code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
"""Module for manipulating tensor names according to Householder notation"""
RM_2_GREEK = {"a":"alpha", "b":"beta", "c":"gamma", "d":"delta",
"e":"epsilon", "h":"eta", "i":"iota", "k":"kappa", "l":"lambda",
"m":"mu", "n":"nu", "o":"omicron", "p":"pi", "r":"rho",
"s":"sigma", "t":"tau", "u":"upsilon", "x":"chi", "z":"zeta"}
GREEK_ALPHA = ["theta", "xi", "phi", "psi", "omega"] + RM_2_GREEK.values()
RM_ALPHA = []
LOWER_TOK = '_'
UPPER_TOK = '^'
TRANS_TOK = 'T'
HAT_TOK = 'H'
INV_TOK = '-'
UP_TOKS = [TRANS_TOK, HAT_TOK, INV_TOK]
def split_name (name):
"""Returns base, lower, upper strings of name"""
def _find (n, s):
idx = n.find(s)
return idx if idx > 0 else len(n)
base_end = min(map(lambda x: _find(name, x), [LOWER_TOK, UPPER_TOK]))
low_start = _find(name, LOWER_TOK)
up_start = _find(name, UPPER_TOK)
low_end = up_start if up_start > low_start else len(name)
up_end = low_start if up_start < low_start else len(name)
base = name[:base_end]
low = name[low_start:low_end].strip(LOWER_TOK + '{' + '}')
up = name[up_start:up_end].strip(UPPER_TOK + '{' + '}')
return base, low, up
def join_name (name, lower, upper, latex=False):
"""Returns name string with upper and lower indices.
>>> join_name("A", "0", "1")
A_0^1
>>> join_name("A", "bl", "2")
A_bl^2
>>> join_name("A", "bl", "2", latex=True)
A_{bl}^2
"""
ret_val = name
if lower:
ret_val += LOWER_TOK
if latex and len(lower) > 1:
ret_val += '{' + lower + '}'
else:
ret_val += lower
if upper:
ret_val += UPPER_TOK
if latex and len(upper) > 1:
ret_val += '{' + upper + '}'
else:
ret_val += upper
return ret_val
def add_idx (name, idx, latex=False):
"""Returns a name with an added index.
>>> add_idx("a", "r")
"a[r]"
>>> add_idx("a_0^2", "r")
"a[r]_0^2"
>>> add_idx("a[r]", "n")
"a[r][n]"
>>> add_idx("a", 3)
"a[3]"
>>> add_idx("a_02", "delta", latex=True)
"a[\delta]_{02}
"""
b, l, u = split_name(name)
b = b + "[" + str(idx) + "]"
return join_name(b, l, u, latex)
def to_latex(name):
"""Returns name for latex printing.
>>> to_latex("A_01^T")
'A_{01}^T'
>>> to_latex("alpha")
'\\alpha'
"""
n, l, u = split_name(name)
if n in GREEK_ALPHA:
n = '\\' + n
return join_name(n, l, u, latex=True)
def base (name):
return split_name(name)[0]
def lower (name):
return split_name(name)[1]
def upper (name):
return split_name(name)[2]
def add_upper_ind (name, ind):
base, l, u = split_name(name)
if ind in u:
return name
u += ind
return join_name(base, l, ''.join(sorted(u)))
def set_upper_ind (name, ind):
base, l, _ = split_name(name)
return join_name(base, l, ind)
def add_lower_ind (name, ind):
base, l, u = split_name(name)
if ind in l:
return name
l += ind
return join_name(base, ''.join(sorted(l)), u)
def set_lower_ind (name, ind):
base, _, u = split_name(name)
return join_name(base, ind, ''.join(sorted(u)))
def transpose_name (name):
return add_upper_ind(name, TRANS_TOK)
def hat_name (name):
return add_upper_ind(name, HAT_TOK)
def inv_name (name):
return add_upper_ind(name, INV_TOK)
def householder_name (name, rank):
"""Returns if the name conforms to Householder notation.
>>> householder_name('A_1', 2)
True
>>> householder_name('foobar', 1)
False
"""
base, _, _ = split_name(name)
if base in ['0', '1']:
return True
elif rank == 0:
if base in GREEK_ALPHA:
return True
elif rank == 1:
if len(base) == 1 and base.isalpha() and base.islower():
return True
elif rank == 2:
if len(base) == 1 and base.isupper() and base.isalpha():
return True
return False
def convert_name (name, rank):
"""Converts a Householder name to a specific rank.
Will return non-Householder names unchanged.
>>> convert_name("A", 1)
'a'
>>> convert_name("alpha_01", 2)
'A_01'
>>> convert_name("foo_bar", 1)
'foo_bar'
"""
name_rank = rank_from_name(name)
if name_rank == rank:
return name
if not householder_name(name, name_rank):
return name
base, low, up = split_name(name)
if base[0] in ['0', '1']:
return name
elif name_rank == 2:
r_1 = join_name(base.lower(), low, up)
if rank == 1:
return r_1
elif rank == 0:
return convert_name(r_1, 0)
elif name_rank == 1:
if rank == 2:
return join_name(base.upper(), low, up)
if rank == 0:
sca_name = RM_2_GREEK.get(base, None)
if sca_name:
return join_name(sca_name, low, up)
else:
return join_name("s" + base, low, up)
elif name_rank == 0:
alpha_name = None
if base[0] == 's':
alpha_name = base[1:]
else:
for k, v in RM_2_GREEK.iteritems():
if v == base:
alpha_name = k
break
if alpha_name:
r_1 = join_name(alpha_name, low, up)
if rank == 2:
return convert_name(r_1, 2)
elif rank == 1:
return r_1
raise ValueError("Unable to convert name: %s." % name)
def rank_from_name (name):
base, _, _ = split_name(name)
if len(base) == 1 and base.isalpha():
if base.isupper():
return 2
if base.islower():
return 1
if base in GREEK_ALPHA:
return 0
return None
| IgnitionProject/ignition | ignition/dsl/flame/tensors/tensor_names.py | Python | bsd-3-clause | 5,816 |
plugin_class = "shipmaster.plugins.ssh.ssh.SSHPlugin"
| damoti/shipmaster | shipmaster/plugins/ssh/__init__.py | Python | bsd-3-clause | 54 |
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, DateTime
import yaml
conf = yaml.load(open('conf.yml'))
psql_cstr = conf['postgres_connection_str']
psql_schema = conf['postgres_schema']
engine = create_engine(psql_cstr)
conn = engine.connect()
metadata = MetaData(schema=psql_schema)
articles = Table('articles', metadata,
Column('id', Integer, primary_key=True),
Column('title', String),
Column('author', String),
Column('url', String),
Column('updated', DateTime)
)
# metadata.create_all(engine)
| bwaite/fetch-to-atom | db.py | Python | mit | 623 |
x = input()
y = input()
print x + y
| cantora/pyc | p0tests/grader_tests/input_2.py | Python | gpl-3.0 | 36 |
#!/usr/bin/env python3
#
# cargo_plugin.py
#
# Copyright © 2016 Christian Hergert <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import gi
import threading
import os
gi.require_version('Ide', '1.0')
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Ide
_ = Ide.gettext
_CARGO = 'cargo'
class CargoBuildSystem(Ide.Object, Ide.BuildSystem, Gio.AsyncInitable):
project_file = GObject.Property(type=Gio.File)
def do_get_id(self):
return 'cargo'
def do_get_display_name(self):
return 'Cargo'
def do_init_async(self, io_priority, cancellable, callback, data):
task = Gio.Task.new(self, cancellable, callback)
# This is all done synchronously, doing it in a thread would probably
# be somewhat ideal although unnecessary at this time.
try:
# Maybe this is a Cargo.toml
if self.props.project_file.get_basename() in ('Cargo.toml',):
task.return_boolean(True)
return
# Maybe this is a directory with a Cargo.toml
if self.props.project_file.query_file_type(0) == Gio.FileType.DIRECTORY:
child = self.props.project_file.get_child('Cargo.toml')
if child.query_exists(None):
self.props.project_file = child
task.return_boolean(True)
return
except Exception as ex:
task.return_error(ex)
task.return_error(Ide.NotSupportedError())
def do_init_finish(self, task):
return task.propagate_boolean()
def do_get_priority(self):
return 300
def locate_cargo_from_config(config):
cargo = _CARGO
if config:
runtime = config.get_runtime()
if config.getenv('CARGO'):
cargo = config.getenv('CARGO')
elif not runtime or not runtime.contains_program_in_path(_CARGO):
cargo_in_home = os.path.expanduser('~/.cargo/bin/cargo')
if os.path.exists(cargo_in_home):
cargo = cargo_in_home
return cargo
class CargoPipelineAddin(Ide.Object, Ide.BuildPipelineAddin):
"""
The CargoPipelineAddin is responsible for creating the necessary build
stages and attaching them to phases of the build pipeline.
"""
def do_load(self, pipeline):
context = self.get_context()
build_system = context.get_build_system()
# Ignore pipeline unless this is a cargo project
if type(build_system) != CargoBuildSystem:
return
cargo_toml = build_system.props.project_file.get_path()
config = pipeline.get_configuration()
system_type = config.get_device().get_system_type()
builddir = pipeline.get_builddir()
runtime = config.get_runtime()
# We might need to use cargo from ~/.cargo/bin
cargo = locate_cargo_from_config(config)
# Fetch dependencies so that we no longer need network access
fetch_launcher = pipeline.create_launcher()
fetch_launcher.setenv('CARGO_TARGET_DIR', builddir, True)
fetch_launcher.push_argv(cargo)
fetch_launcher.push_argv('fetch')
fetch_launcher.push_argv('--manifest-path')
fetch_launcher.push_argv(cargo_toml)
self.track(pipeline.connect_launcher(Ide.BuildPhase.DOWNLOADS, 0, fetch_launcher))
# Fetch dependencies so that we no longer need network access
build_launcher = pipeline.create_launcher()
build_launcher.setenv('CARGO_TARGET_DIR', builddir, True)
build_launcher.push_argv(cargo)
build_launcher.push_argv('build')
build_launcher.push_argv('--verbose')
build_launcher.push_argv('--manifest-path')
build_launcher.push_argv(cargo_toml)
build_launcher.push_argv('--message-format')
build_launcher.push_argv('human')
if Ide.get_system_type() != system_type:
build_launcher.push_argv('--target')
build_launcher.push_argv(system_type)
if config.props.parallelism > 0:
build_launcher.push_argv('-j{}'.format(config.props.parallelism))
if not config.props.debug:
build_launcher.push_argv('--release')
clean_launcher = pipeline.create_launcher()
clean_launcher.setenv('CARGO_TARGET_DIR', builddir, True)
clean_launcher.push_argv(cargo)
clean_launcher.push_argv('clean')
clean_launcher.push_argv('--manifest-path')
clean_launcher.push_argv(cargo_toml)
build_stage = Ide.BuildStageLauncher.new(context, build_launcher)
build_stage.set_name(_("Building project"))
build_stage.set_clean_launcher(clean_launcher)
self.track(pipeline.connect(Ide.BuildPhase.BUILD, 0, build_stage))
class CargoBuildTarget(Ide.Object, Ide.BuildTarget):
def do_get_install_directory(self):
return None
def do_get_name(self):
return 'cargo-run'
def do_get_language(self):
return 'rust'
def do_get_argv(self):
context = self.get_context()
config_manager = context.get_configuration_manager()
config = config_manager.get_current()
cargo = locate_cargo_from_config(config)
# Pass the Cargo.toml path so that we don't
# need to run from the project directory.
project_file = context.get_project_file()
if project_file.get_basename() == 'Cargo.toml':
cargo_toml = project_file.get_path()
else:
cargo_toml = project_file.get_child('Cargo.toml')
return [cargo, 'run', '--manifest-path', cargo_toml]
def do_get_priority(self):
return 0
class CargoBuildTargetProvider(Ide.Object, Ide.BuildTargetProvider):
def do_get_targets_async(self, cancellable, callback, data):
task = Gio.Task.new(self, cancellable, callback)
task.set_priority(GLib.PRIORITY_LOW)
context = self.get_context()
build_system = context.get_build_system()
if type(build_system) != CargoBuildSystem:
task.return_error(GLib.Error('Not cargo build system',
domain=GLib.quark_to_string(Gio.io_error_quark()),
code=Gio.IOErrorEnum.NOT_SUPPORTED))
return
task.targets = [CargoBuildTarget(context=self.get_context())]
task.return_boolean(True)
def do_get_targets_finish(self, result):
if result.propagate_boolean():
return result.targets
class CargoDependencyUpdater(Ide.Object, Ide.DependencyUpdater):
def do_update_async(self, cancellable, callback, data):
task = Gio.Task.new(self, cancellable, callback)
task.set_priority(GLib.PRIORITY_LOW)
context = self.get_context()
build_system = context.get_build_system()
# Short circuit if not using cargo
if type(build_system) != CargoBuildSystem:
task.return_boolean(True)
return
build_manager = context.get_build_manager()
pipeline = build_manager.get_pipeline()
if not pipeline:
task.return_error(GLib.Error('Cannot update dependencies without build pipeline',
domain=GLib.quark_to_string(Gio.io_error_quark()),
code=Gio.IOErrorEnum.FAILED))
return
config_manager = context.get_configuration_manager()
config = config_manager.get_current()
cargo = locate_cargo_from_config(config)
cargo_toml = build_system.props.project_file.get_path()
launcher = pipeline.create_launcher()
launcher.setenv('CARGO_TARGET_DIR', pipeline.get_builddir(), True)
launcher.push_argv(cargo)
launcher.push_argv('update')
launcher.push_argv('--manifest-path')
launcher.push_argv(cargo_toml)
try:
subprocess = launcher.spawn()
subprocess.wait_check_async(None, self.wait_check_cb, task)
except Exception as ex:
task.return_error(ex)
def do_update_finish(self, result):
return result.propagate_boolean()
def wait_check_cb(self, subprocess, result, task):
try:
subprocess.wait_check_finish(result)
task.return_boolean(True)
except Exception as ex:
task.return_error(ex)
| albfan/gnome-builder | src/plugins/cargo/cargo_plugin.py | Python | gpl-3.0 | 9,062 |
"""Menu File description"""
from PyQt5.QtWidgets import qApp, QFileDialog, QMessageBox
from copy import deepcopy
from app.parser import *
from app.lattice import *
class GUIMenuFile():
def __init__(self, MainWindow):
self.mw = MainWindow
def __del__(self):
pass
def new_lattice(self):
"""Reset and init new lattice"""
self.mw.lattice = GUILattice()
self.mw.menu_edit.edit_lattice()
def dialog_open_lattice(self):
"""Read lattice from file"""
filename = QFileDialog.getOpenFileName(self.mw, 'Open Lattice', '', "Python Files (*.py);;All Files (*)", options=QFileDialog.DontUseNativeDialog)[0]
if filename == '':
return 0
self.mw.lattice = GUILattice()
# executing opened file
loc_dict = {}
try:
exec(open(filename).read(), globals(), loc_dict)
except Exception as err:
self.mw.error_window('Open Lattice File Error', str(err))
# parsing sequences and create elements list from cell
if 'cell' in loc_dict:
self.mw.lattice.cell = deepcopy(loc_dict['cell'])
lp = Parser()
self.mw.lattice.elements = lp.get_elements(self.mw.lattice.cell)
else:
self.mw.error_window('Open Lattice File Error', 'NO secuence named "cell"')
# parsing method
if 'method' in loc_dict:
self.mw.lattice.method = deepcopy(loc_dict['method'])
# parsing beam
if 'beam' in loc_dict:
self.mw.lattice.beam = deepcopy(loc_dict['beam'])
# parsing tws0
if 'tws0' in loc_dict:
self.mw.lattice.tws0 = deepcopy(loc_dict['tws0'])
self.mw.menu_edit.edit_lattice()
def dialog_save_lattice(self):
"""Save lattice to file"""
filename = QFileDialog.getSaveFileName(self.mw, 'Save Lattice', '', "Python Files (*.py);;All Files (*)", options=QFileDialog.DontUseNativeDialog)
if filename[0] == '':
return 0
lp = Parser()
lines = lp.gui_lattice2input(self.mw.lattice, split=True)
if filename[1] == 'Python Files (*.py)' and filename[0][-3:] != '.py':
filename = filename[0] + '.py'
else:
filename = filename[0]
with open(filename, 'w') as fp:
fp.writelines(lines)
| ocelot-collab/ocelot | ocelot_gui/app/menu_file.py | Python | gpl-3.0 | 2,446 |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
from django.conf import settings
from django.core.cache import cache
import oauth.oauth as oauth
from common import api
from common import exception
from common import legacy
from common import oauth_util
from common import util
def get_user_from_request(request):
"""attempt to get a logged in user based on the request
most likely from a cookie
"""
nick = request.COOKIES.get(settings.USER_COOKIE, None)
token = request.COOKIES.get(settings.PASSWORD_COOKIE, None)
if nick:
# try to authenticate the dude via cookie
user = authenticate_user_cookie(nick, token)
return user
if (settings.API_ALLOW_LEGACY_AUTH
and 'personal_key' in request.REQUEST
and 'user' in request.REQUEST):
user = legacy.authenticate_user_personal_key(
request.REQUEST['user'], request.REQUEST['personal_key'])
if user:
user.legacy = True
return user
# we may not be authenticating via cookie, check oauth also
# Note: This will have the effect that any valid OAuth request
# will effectively be treated as a logged in user with one
# small difference, api users (via OAuth, etc) are given
# a permission level of read, write or delete which limits
# what they are able to do on the site.
if (('oauth_token' in request.REQUEST and 'oauth_consumer_key' in request.REQUEST)
or 'HTTP_AUTHORIZATION' in request.META):
oauth_util.verify_request(request)
user = oauth_util.get_api_user(request)
return user
return None
def lookup_user_auth_token(nick, token):
return cache.get("user_auth_token/%s/%s" % (nick, token))
def generate_user_auth_token(nick, password, timeout=(14 * 24 * 60 * 60)):
token = util.hash_generic(util.generate_uuid())
cache.set("user_auth_token/%s/%s" % (nick, token), password, timeout)
return token
def authenticate_user_cookie(nick, token):
user = api.actor_get_safe(api.ROOT, nick)
if not user:
return None
# user's authenticated via cookie have full access
user.access_level = api.DELETE_ACCESS
cached_token = lookup_user_auth_token(user.nick, token)
if not cached_token:
return None
if user.password != cached_token:
return None
return user
def authenticate_user_login(nick, password):
user = api.actor_lookup_nick(api.ROOT, nick)
if not user:
return None
# user's authenticated via login have full access
user.access_level = api.DELETE_ACCESS
if settings.DEBUG and password == "password":
return user
if user.password == util.hash_password(user.nick, password):
return user
# we're changing the password hashing, this will update their password
# to their new format
# TODO(termie): The settings.MANAGE_PY stuff below is so that the tests
# will continue to work with fixtures that have the passwords
# in clear text. We should probably remove this and change
# the passwords in the fixtures to be the legacy-style
# passwords.
if (user.password == util.hash_password_intermediate(user.nick, password)
or settings.MANAGE_PY and user.password == password):
logging.debug("updating password for intermediate user: %s", user.nick)
user = api.actor_update_intermediate_password(api.ROOT,
user.nick,
password)
# a little repeat of above since we have a new user instance now
user.access_level = api.DELETE_ACCESS
return user
return None
def lookup_user_by_login(login, password):
"""Looks up user by a given login. Returns None on failure.
login - can be either nick or confirmed email
password - password associated withe the user
"""
try:
current_user = authenticate_user_login(login, password)
if current_user:
return current_user
except exception.ValidationError:
pass # let's try the email address next
# login can be confirmed email address
actor_ref = api.actor_lookup_email(api.ROOT, login)
if actor_ref:
return authenticate_user_login(actor_ref.nick, password)
return None
def set_user_cookie(response, user, remember=False):
if remember:
two_weeks = datetime.datetime.utcnow() + datetime.timedelta(days=14)
expires = two_weeks.strftime("%a %d-%b-%y %H:%M:%S GMT")
else:
expires = None
auth_token = generate_user_auth_token(user.nick, user.password)
if settings.COOKIE_DOMAIN == "localhost":
response.set_cookie(settings.USER_COOKIE,
user.nick,
expires=expires,
path=settings.COOKIE_PATH)
response.set_cookie(settings.PASSWORD_COOKIE,
auth_token,
expires=expires,
path=settings.COOKIE_PATH)
else:
response.set_cookie(settings.USER_COOKIE,
user.nick,
expires=expires,
path=settings.COOKIE_PATH,
domain=settings.COOKIE_DOMAIN)
response.set_cookie(settings.PASSWORD_COOKIE,
auth_token,
expires=expires,
path=settings.COOKIE_PATH,
domain=settings.COOKIE_DOMAIN)
return response
def clear_user_cookie(response):
if settings.COOKIE_DOMAIN == "localhost":
response.delete_cookie(settings.USER_COOKIE, path=settings.COOKIE_PATH)
response.delete_cookie(settings.PASSWORD_COOKIE, path=settings.COOKIE_PATH)
else:
response.delete_cookie(settings.USER_COOKIE,
path=settings.COOKIE_PATH,
domain=settings.COOKIE_DOMAIN)
response.delete_cookie(settings.PASSWORD_COOKIE,
path=settings.COOKIE_PATH,
domain=settings.COOKIE_DOMAIN)
return response
| jimpick/jaikuengine | common/user.py | Python | apache-2.0 | 6,537 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import contextlib
from urllib import parse
import apt
import requests_unixsocket
from requests import exceptions
def is_snap_installed(snap):
local_snap_info = None
with contextlib.suppress(exceptions.HTTPError):
local_snap_info = _get_local_snap_info(_get_parsed_snap(snap)[0])
return local_snap_info is not None
def _get_parsed_snap(snap):
if '/' in snap:
sep_index = snap.find('/')
snap_name = snap[:sep_index]
snap_channel = snap[sep_index+1:]
else:
snap_name = snap
snap_channel = ''
return snap_name, snap_channel
def _get_snapd_socket_path_template():
return 'http+unix://%2Frun%2Fsnapd.socket/v2/{}'
def _get_local_snap_info(name):
slug = 'snaps/{}'.format(parse.quote(name, safe=''))
url = _get_snapd_socket_path_template().format(slug)
with requests_unixsocket.Session() as session:
snap_info = session.get(url)
snap_info.raise_for_status()
return snap_info.json()['result']
def is_package_installed(name):
with apt.Cache() as apt_cache:
return apt_cache[name].installed
| elopio/snapcraft | tests/integration/repo.py | Python | gpl-3.0 | 1,775 |
# -*- coding: utf-8 -*-
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class CloneConfig(AppConfig):
name = 'clone'
verbose_name = _('ZX Spectrum clones database')
| kgbplus/trdos_site | clone/apps.py | Python | gpl-3.0 | 218 |
from django.conf import settings
from django.core.urlresolvers import resolve, Resolver404
from django.shortcuts import redirect
from djangoproject import urls as frontend_urls
from store.models import Merchant
import sys
class MerchantSubdomainMiddleware(object):
def process_request(self, request):
path = request.path
domain = request.META['HTTP_HOST']
pieces = domain.split('.')
redirect_path = "http://{0}{1}".format(
settings.DEFAULT_SITE_DOMAIN, path)
if domain == settings.DEFAULT_SITE_DOMAIN:
return None
try:
resolve(path, frontend_urls)
except Resolver404:
# The slashes are not being appended before getting here
resolve(u"{0}/".format(path), frontend_urls)
try:
merchant = Merchant.objects.get(subdomain=pieces[0])
except Merchant.DoesNotExist:
return redirect(redirect_path)
request.Merchant = merchant
return None | fulcircle/djangostore | store/middleware/subdomains.py | Python | gpl-2.0 | 1,012 |
# -*- coding: utf-8 -*-
# std lib
import logging
# 3rd parties
from flask import Flask
from flask_socketio import SocketIO
# local
from sentinel_gui import settings
def setup_logging(app):
logger = logging.getLogger('sentinel_gui')
loglevel = logging.DEBUG if settings.DEBUG else logging.INFO
logger.setLevel(loglevel)
lh = logging.StreamHandler()
lh.setLevel(loglevel)
lh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s'))
logger.addHandler(lh)
app.logger.addHandler(lh)
app = Flask(__name__)
app.config.from_object('sentinel_gui.settings')
setup_logging(app)
socketio = SocketIO(app)
from sentinel_gui.core import models
sentinel_manager = models.SentinelManager()
from sentinel_gui import views
if __name__ == '__main__':
socketio.run(app)
| cgarciaarano/sentinel_gui | sentinel_gui/web.py | Python | gpl-3.0 | 826 |
#!/usr/bin/env python
#
# Plugins.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
class Plugins(list):
def __init__(self):
list.__init__(self)
@property
def length(self):
return len(self)
def __getattr__(self, key):
return self.namedItem(key)
def __getitem__(self, key):
try:
key = int(key)
return self.item(key)
except ValueError:
return self.namedItem(key)
def item(self, index):
if index >= self.length:
return None
return list.__getitem__(self, index)
def namedItem(self, name):
index = 0
while index < self.length:
p = self.item(index)
if p['name'] == name:
return p
index += 1
return None
def refresh(self, reloadDocuments = False):
pass
| pdelsante/thug | thug/DOM/Plugins.py | Python | gpl-2.0 | 1,471 |
#!/usr/bin/python3
#
# Copyright (C) 2018 Rafael Senties Martinelli
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk
from gi.repository import GObject
_LEFT_CLICK_ID = 1
_RIGHT_CLICK_ID = 3
class ColorToolItem(Gtk.ToolItem):
def __init__(self, start_color, window_parent=None, position=0):
super().__init__()
self.set_position(position)
self.__picked = False
self._rectangle_width = 50
self._rectangle_height = 50
self.__menu = Gtk.Menu()
gtk_image = Gtk.Image()
gtk_image.set_from_stock(Gtk.STOCK_DELETE, 1)
self.__delete_menuitem = Gtk.ImageMenuItem('Delete')
self.__delete_menuitem.set_image(gtk_image)
self.__delete_menuitem.connect('activate', self.__on_delete_menuitem_clicked)
self.__menu.append(self.__delete_menuitem)
self.__menu.show_all()
self.__color_selector = Gtk.ColorSelectionDialog("Select a color", window_parent)
self.set_color(start_color)
self.__drawing_area = Gtk.DrawingArea()
self.__drawing_area.set_size_request(self._rectangle_width, self._rectangle_height)
self.__drawing_area.connect('draw', self.__on_draw)
self.__drawing_area.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.__drawing_area.connect('button-press-event', self.__on_clicked)
self.add(self.__drawing_area)
def set_color(self, color):
if isinstance(color, str):
self.__current_color = Gdk.RGBA()
self.__current_color.parse(color)
else:
self.__current_color = color
def set_picked(self, status):
self.__picked = status
def set_position(self, column):
self.__position = column
def get_position(self):
return self.__position
def get_picked(self):
return self.__picked
def get_current_rgba(self):
return self.__current_color
def __on_draw(self, drawing_area, cr):
cr.rectangle(0, 0, self._rectangle_width, self._rectangle_height)
cr.set_source_rgb(self.__current_color.red, self.__current_color.green, self.__current_color.blue)
cr.fill()
if self.__picked:
cr.set_font_size(40)
cr.set_source_rgb(0, 0, 0)
cr.move_to(5, self._rectangle_height-5)
cr.show_text("✓")
drawing_area.queue_draw_area(0, 0, self._rectangle_width, self._rectangle_height)
return True
def __on_clicked(self, widget, event):
if event.button == _LEFT_CLICK_ID:
if event.type == Gdk.EventType.BUTTON_PRESS:
self.emit('picked')
elif event.type == Gdk.EventType._2BUTTON_PRESS:
response = self.__color_selector.run()
if response == Gtk.ResponseType.OK:
gdk_color = self.__color_selector.get_color_selection().get_current_color()
rgba_color = Gdk.RGBA.from_color(gdk_color)
self.set_color(rgba_color)
self.emit('color-changed')
self.emit('picked')
self.__color_selector.hide()
elif _RIGHT_CLICK_ID:
self.__menu.popup(None, None, None, None, 0, Gtk.get_current_event_time())
def __on_delete_menuitem_clicked(self, widget):
self.emit("request-delete")
# I ignore why this wasn't possible to be defined at __init__ as same as the ColorChooserToolbar
GObject.type_register(ColorToolItem)
GObject.signal_new('color-changed', ColorToolItem, GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, ())
GObject.type_register(ColorToolItem)
GObject.signal_new('picked', ColorToolItem, GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, ())
GObject.type_register(ColorToolItem)
GObject.signal_new('request-delete', ColorToolItem, GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, ())
| rsm-gh/alienware-kbl | usr/lib/python3/AKBL/Addons/GUI/ColorChooserToolbar/ColorToolItem.py | Python | gpl-3.0 | 4,805 |
from .bang import Bang, Uninit
from .gui_main import MFPGUI
| bgribble/mfp | mfp/__init__.py | Python | gpl-2.0 | 61 |
# -*- coding: utf-8 -*-
"""
direct PAS
Python Application Services
----------------------------------------------------------------------------
(C) direct Netware Group - All rights reserved
https://www.direct-netware.de/redirect?pas;media
The following license agreement remains valid unless any additions or
changes are being made by direct Netware Group in a written form.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
----------------------------------------------------------------------------
https://www.direct-netware.de/redirect?licenses;gpl
----------------------------------------------------------------------------
setup.py
"""
def get_version():
"""
Returns the version currently in development.
:return: (str) Version string
:since: v0.1.01
"""
return "v0.2.00"
#
from dNG.distutils.command.build_py import BuildPy
from dNG.distutils.command.install_data import InstallData
from dNG.distutils.temporary_directory import TemporaryDirectory
from distutils.core import setup
from os import path
with TemporaryDirectory(dir = ".") as build_directory:
parameters = { "pasMediaVersion": get_version() }
InstallData.set_build_target_path(build_directory)
InstallData.set_build_target_parameters(parameters)
_build_path = path.join(build_directory, "src")
setup(name = "pas_media",
version = get_version(),
description = "Python Application Services",
long_description = """"pas_media" provides the abstract classes to query metadata of multimedia files.""",
author = "direct Netware Group et al.",
author_email = "[email protected]",
license = "GPLv2+",
url = "https://www.direct-netware.de/redirect?pas;media",
platforms = [ "any" ],
package_dir = { "": _build_path },
packages = [ "dNG" ],
data_files = [ ( "docs", [ "LICENSE", "README" ]) ],
# Override build_py to first run builder.py over all PAS modules
cmdclass = { "build_py": BuildPy,
"install_data": InstallData
}
)
#
| dNG-git/pas_media | setup.py | Python | gpl-2.0 | 2,756 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import logging
from functools import reduce
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator, MaxValueValidator
from common.utils import get_signer
from .base import AssetUser
from .asset import Asset
__all__ = ['AdminUser', 'SystemUser']
logger = logging.getLogger(__name__)
signer = get_signer()
class AdminUser(AssetUser):
"""
A privileged user that ansible can use it to push system user and so on
"""
BECOME_METHOD_CHOICES = (
('sudo', 'sudo'),
('su', 'su'),
)
become = models.BooleanField(default=True)
become_method = models.CharField(choices=BECOME_METHOD_CHOICES, default='sudo', max_length=4)
become_user = models.CharField(default='root', max_length=64)
_become_pass = models.CharField(default='', blank=True, max_length=128)
CONNECTIVITY_CACHE_KEY = '_ADMIN_USER_CONNECTIVE_{}'
_prefer = "admin_user"
def __str__(self):
return self.name
@property
def become_pass(self):
password = signer.unsign(self._become_pass)
if password:
return password
else:
return ""
@become_pass.setter
def become_pass(self, password):
self._become_pass = signer.sign(password)
@property
def become_info(self):
if self.become:
info = {
"method": self.become_method,
"user": self.become_user,
"pass": self.become_pass,
}
else:
info = None
return info
class Meta:
ordering = ['name']
unique_together = [('name', 'org_id')]
verbose_name = _("Admin user")
@classmethod
def generate_fake(cls, count=10):
from random import seed
import forgery_py
from django.db import IntegrityError
seed()
for i in range(count):
obj = cls(name=forgery_py.name.full_name(),
username=forgery_py.internet.user_name(),
password=forgery_py.lorem_ipsum.word(),
comment=forgery_py.lorem_ipsum.sentence(),
created_by='Fake')
try:
obj.save()
logger.debug('Generate fake asset group: %s' % obj.name)
except IntegrityError:
print('Error continue')
continue
class SystemUser(AssetUser):
PROTOCOL_SSH = 'ssh'
PROTOCOL_RDP = 'rdp'
PROTOCOL_TELNET = 'telnet'
PROTOCOL_VNC = 'vnc'
PROTOCOL_CHOICES = (
(PROTOCOL_SSH, 'ssh'),
(PROTOCOL_RDP, 'rdp'),
(PROTOCOL_TELNET, 'telnet (beta)'),
(PROTOCOL_VNC, 'vnc'),
)
LOGIN_AUTO = 'auto'
LOGIN_MANUAL = 'manual'
LOGIN_MODE_CHOICES = (
(LOGIN_AUTO, _('Automatic login')),
(LOGIN_MANUAL, _('Manually login'))
)
nodes = models.ManyToManyField('assets.Node', blank=True, verbose_name=_("Nodes"))
assets = models.ManyToManyField('assets.Asset', blank=True, verbose_name=_("Assets"))
priority = models.IntegerField(default=20, verbose_name=_("Priority"), validators=[MinValueValidator(1), MaxValueValidator(100)])
protocol = models.CharField(max_length=16, choices=PROTOCOL_CHOICES, default='ssh', verbose_name=_('Protocol'))
auto_push = models.BooleanField(default=True, verbose_name=_('Auto push'))
sudo = models.TextField(default='/bin/whoami', verbose_name=_('Sudo'))
shell = models.CharField(max_length=64, default='/bin/bash', verbose_name=_('Shell'))
login_mode = models.CharField(choices=LOGIN_MODE_CHOICES, default=LOGIN_AUTO, max_length=10, verbose_name=_('Login mode'))
cmd_filters = models.ManyToManyField('CommandFilter', related_name='system_users', verbose_name=_("Command filter"), blank=True)
def __str__(self):
return '{0.name}({0.username})'.format(self)
@property
def login_mode_display(self):
return self.get_login_mode_display()
def is_need_push(self):
if self.auto_push and self.protocol in [self.PROTOCOL_SSH, self.PROTOCOL_RDP]:
return True
else:
return False
@property
def cmd_filter_rules(self):
from .cmd_filter import CommandFilterRule
rules = CommandFilterRule.objects.filter(
filter__in=self.cmd_filters.all()
).distinct()
return rules
def is_command_can_run(self, command):
for rule in self.cmd_filter_rules:
action, matched_cmd = rule.match(command)
if action == rule.ACTION_ALLOW:
return True, None
elif action == rule.ACTION_DENY:
return False, matched_cmd
return True, None
def get_all_assets(self):
from .node import Node
args = [Q(systemuser=self)]
pattern = set()
nodes_keys = self.nodes.all().values_list('key', flat=True)
nodes_keys = Node.clean_children_keys(nodes_keys)
for key in nodes_keys:
pattern.add(r'^{0}$|^{0}:'.format(key))
pattern = '|'.join(list(pattern))
if pattern:
args.append(Q(nodes__key__regex=pattern))
args = reduce(lambda x, y: x | y, args)
assets = Asset.objects.filter(args).distinct()
return assets
class Meta:
ordering = ['name']
unique_together = [('name', 'org_id')]
verbose_name = _("System user")
@classmethod
def generate_fake(cls, count=10):
from random import seed
import forgery_py
from django.db import IntegrityError
seed()
for i in range(count):
obj = cls(name=forgery_py.name.full_name(),
username=forgery_py.internet.user_name(),
password=forgery_py.lorem_ipsum.word(),
comment=forgery_py.lorem_ipsum.sentence(),
created_by='Fake')
try:
obj.save()
logger.debug('Generate fake asset group: %s' % obj.name)
except IntegrityError:
print('Error continue')
continue
| eli261/jumpserver | apps/assets/models/user.py | Python | gpl-2.0 | 6,263 |
# Python 2 and 3:
try:
# Python 3 only:
from urllib.parse import urlencode, urlsplit, parse_qs, unquote
except ImportError:
# Python 2 only:
from urlparse import parse_qs, urlsplit
from urllib import urlencode, unquote
| fasihahmad/django-rest-framework-related-views | rest_framework_related/py2_3.py | Python | gpl-3.0 | 239 |
import pexpect
p = pexpect.spawn(["login"])
p.expect("login:")
p.sendline("wrong")
p.expect("Password:")
p.sendline("wrong")
p.expect("Login incorrect")
| masahir0y/buildroot-yamada | support/testing/tests/package/sample_python_pexpect.py | Python | gpl-2.0 | 154 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
When a document is converted to plain-text from PDF,
certain characters may result in the plain-text, that are
either unwanted, or broken. These characters need to be corrected
or removed. Examples are, certain control characters that would
be illegal in XML and must be removed; TeX ligatures (etc); broken
accents such as umlauts on letters that must be corrected.
This function returns a dictionary of (unwanted) characters to look
for and the characters that should be used to replace them.
@return: (dictionary) - { seek -> replace, } or charsacters to
replace in plain-text.
"""
import re
import subprocess
from six import iteritems
from invenio.config import CFG_PATH_PDFTOTEXT
from invenio.legacy.docextract.utils import write_message
# a dictionary of undesirable characters and their replacements:
UNDESIRABLE_CHAR_REPLACEMENTS = {
# Control characters not allowed in XML:
u'\u2028' : u"",
u'\u2029' : u"",
u'\u202A' : u"",
u'\u202B' : u"",
u'\u202C' : u"",
u'\u202D' : u"",
u'\u202E' : u"",
u'\u206A' : u"",
u'\u206B' : u"",
u'\u206C' : u"",
u'\u206D' : u"",
u'\u206E' : u"",
u'\u206F' : u"",
u'\uFFF9' : u"",
u'\uFFFA' : u"",
u'\uFFFB' : u"",
u'\uFFFC' : u"",
u'\uFEFF' : u"",
# Remove the result of an bad UTF-8 character
u'\uFFFF' : u"",
# Language Tag Code Points:
u"\U000E0000" : u"",
u"\U000E0001" : u"",
u"\U000E0002" : u"",
u"\U000E0003" : u"",
u"\U000E0004" : u"",
u"\U000E0005" : u"",
u"\U000E0006" : u"",
u"\U000E0007" : u"",
u"\U000E0008" : u"",
u"\U000E0009" : u"",
u"\U000E000A" : u"",
u"\U000E000B" : u"",
u"\U000E000C" : u"",
u"\U000E000D" : u"",
u"\U000E000E" : u"",
u"\U000E000F" : u"",
u"\U000E0010" : u"",
u"\U000E0011" : u"",
u"\U000E0012" : u"",
u"\U000E0013" : u"",
u"\U000E0014" : u"",
u"\U000E0015" : u"",
u"\U000E0016" : u"",
u"\U000E0017" : u"",
u"\U000E0018" : u"",
u"\U000E0019" : u"",
u"\U000E001A" : u"",
u"\U000E001B" : u"",
u"\U000E001C" : u"",
u"\U000E001D" : u"",
u"\U000E001E" : u"",
u"\U000E001F" : u"",
u"\U000E0020" : u"",
u"\U000E0021" : u"",
u"\U000E0022" : u"",
u"\U000E0023" : u"",
u"\U000E0024" : u"",
u"\U000E0025" : u"",
u"\U000E0026" : u"",
u"\U000E0027" : u"",
u"\U000E0028" : u"",
u"\U000E0029" : u"",
u"\U000E002A" : u"",
u"\U000E002B" : u"",
u"\U000E002C" : u"",
u"\U000E002D" : u"",
u"\U000E002E" : u"",
u"\U000E002F" : u"",
u"\U000E0030" : u"",
u"\U000E0031" : u"",
u"\U000E0032" : u"",
u"\U000E0033" : u"",
u"\U000E0034" : u"",
u"\U000E0035" : u"",
u"\U000E0036" : u"",
u"\U000E0037" : u"",
u"\U000E0038" : u"",
u"\U000E0039" : u"",
u"\U000E003A" : u"",
u"\U000E003B" : u"",
u"\U000E003C" : u"",
u"\U000E003D" : u"",
u"\U000E003E" : u"",
u"\U000E003F" : u"",
u"\U000E0040" : u"",
u"\U000E0041" : u"",
u"\U000E0042" : u"",
u"\U000E0043" : u"",
u"\U000E0044" : u"",
u"\U000E0045" : u"",
u"\U000E0046" : u"",
u"\U000E0047" : u"",
u"\U000E0048" : u"",
u"\U000E0049" : u"",
u"\U000E004A" : u"",
u"\U000E004B" : u"",
u"\U000E004C" : u"",
u"\U000E004D" : u"",
u"\U000E004E" : u"",
u"\U000E004F" : u"",
u"\U000E0050" : u"",
u"\U000E0051" : u"",
u"\U000E0052" : u"",
u"\U000E0053" : u"",
u"\U000E0054" : u"",
u"\U000E0055" : u"",
u"\U000E0056" : u"",
u"\U000E0057" : u"",
u"\U000E0058" : u"",
u"\U000E0059" : u"",
u"\U000E005A" : u"",
u"\U000E005B" : u"",
u"\U000E005C" : u"",
u"\U000E005D" : u"",
u"\U000E005E" : u"",
u"\U000E005F" : u"",
u"\U000E0060" : u"",
u"\U000E0061" : u"",
u"\U000E0062" : u"",
u"\U000E0063" : u"",
u"\U000E0064" : u"",
u"\U000E0065" : u"",
u"\U000E0066" : u"",
u"\U000E0067" : u"",
u"\U000E0068" : u"",
u"\U000E0069" : u"",
u"\U000E006A" : u"",
u"\U000E006B" : u"",
u"\U000E006C" : u"",
u"\U000E006D" : u"",
u"\U000E006E" : u"",
u"\U000E006F" : u"",
u"\U000E0070" : u"",
u"\U000E0071" : u"",
u"\U000E0072" : u"",
u"\U000E0073" : u"",
u"\U000E0074" : u"",
u"\U000E0075" : u"",
u"\U000E0076" : u"",
u"\U000E0077" : u"",
u"\U000E0078" : u"",
u"\U000E0079" : u"",
u"\U000E007A" : u"",
u"\U000E007B" : u"",
u"\U000E007C" : u"",
u"\U000E007D" : u"",
u"\U000E007E" : u"",
u"\U000E007F" : u"",
# Musical Notation Scoping
u"\U0001D173" : u"",
u"\U0001D174" : u"",
u"\U0001D175" : u"",
u"\U0001D176" : u"",
u"\U0001D177" : u"",
u"\U0001D178" : u"",
u"\U0001D179" : u"",
u"\U0001D17A" : u"",
u'\u0000' : u"", # NULL
u'\u0001' : u"", # START OF HEADING
# START OF TEXT & END OF TEXT:
u'\u0002' : u"",
u'\u0003' : u"",
u'\u0004' : u"", # END OF TRANSMISSION
# ENQ and ACK
u'\u0005' : u"",
u'\u0006' : u"",
u'\u0007' : u"", # BELL
u'\u0008' : u"", # BACKSPACE
# SHIFT-IN & SHIFT-OUT
u'\u000E' : u"",
u'\u000F' : u"",
# Other controls:
u'\u0010' : u"", # DATA LINK ESCAPE
u'\u0011' : u"", # DEVICE CONTROL ONE
u'\u0012' : u"", # DEVICE CONTROL TWO
u'\u0013' : u"", # DEVICE CONTROL THREE
u'\u0014' : u"", # DEVICE CONTROL FOUR
u'\u0015' : u"", # NEGATIVE ACK
u'\u0016' : u"", # SYNCRONOUS IDLE
u'\u0017' : u"", # END OF TRANSMISSION BLOCK
u'\u0018' : u"", # CANCEL
u'\u0019' : u"", # END OF MEDIUM
u'\u001A' : u"", # SUBSTITUTE
u'\u001B' : u"", # ESCAPE
u'\u001C' : u"", # INFORMATION SEPARATOR FOUR (file separator)
u'\u001D' : u"", # INFORMATION SEPARATOR THREE (group separator)
u'\u001E' : u"", # INFORMATION SEPARATOR TWO (record separator)
u'\u001F' : u"", # INFORMATION SEPARATOR ONE (unit separator)
# \r -> remove it
u'\r' : u"",
# Strange parantheses - change for normal:
u'\x1c' : u'(',
u'\x1d' : u')',
# Some ff from tex:
u'\u0013\u0010' : u'\u00ED',
u'\x0b' : u'ff',
# fi from tex:
u'\x0c' : u'fi',
# ligatures from TeX:
u'\ufb00' : u'ff',
u'\ufb01' : u'fi',
u'\ufb02' : u'fl',
u'\ufb03' : u'ffi',
u'\ufb04' : u'ffl',
# Superscripts from TeX
u'\u2212' : u'-',
u'\u2013' : u'-',
# Word style speech marks:
u'\u201c ': u'"',
u'\u201d' : u'"',
u'\u201c' : u'"',
# pdftotext has problems with umlaut and prints it as diaeresis
# followed by a letter:correct it
# (Optional space between char and letter - fixes broken
# line examples)
u'\u00A8 a' : u'\u00E4',
u'\u00A8 e' : u'\u00EB',
u'\u00A8 i' : u'\u00EF',
u'\u00A8 o' : u'\u00F6',
u'\u00A8 u' : u'\u00FC',
u'\u00A8 y' : u'\u00FF',
u'\u00A8 A' : u'\u00C4',
u'\u00A8 E' : u'\u00CB',
u'\u00A8 I' : u'\u00CF',
u'\u00A8 O' : u'\u00D6',
u'\u00A8 U' : u'\u00DC',
u'\u00A8 Y' : u'\u0178',
u'\xA8a' : u'\u00E4',
u'\xA8e' : u'\u00EB',
u'\xA8i' : u'\u00EF',
u'\xA8o' : u'\u00F6',
u'\xA8u' : u'\u00FC',
u'\xA8y' : u'\u00FF',
u'\xA8A' : u'\u00C4',
u'\xA8E' : u'\u00CB',
u'\xA8I' : u'\u00CF',
u'\xA8O' : u'\u00D6',
u'\xA8U' : u'\u00DC',
u'\xA8Y' : u'\u0178',
# More umlaut mess to correct:
u'\x7fa' : u'\u00E4',
u'\x7fe' : u'\u00EB',
u'\x7fi' : u'\u00EF',
u'\x7fo' : u'\u00F6',
u'\x7fu' : u'\u00FC',
u'\x7fy' : u'\u00FF',
u'\x7fA' : u'\u00C4',
u'\x7fE' : u'\u00CB',
u'\x7fI' : u'\u00CF',
u'\x7fO' : u'\u00D6',
u'\x7fU' : u'\u00DC',
u'\x7fY' : u'\u0178',
u'\x7f a' : u'\u00E4',
u'\x7f e' : u'\u00EB',
u'\x7f i' : u'\u00EF',
u'\x7f o' : u'\u00F6',
u'\x7f u' : u'\u00FC',
u'\x7f y' : u'\u00FF',
u'\x7f A' : u'\u00C4',
u'\x7f E' : u'\u00CB',
u'\x7f I' : u'\u00CF',
u'\x7f O' : u'\u00D6',
u'\x7f U' : u'\u00DC',
u'\x7f Y' : u'\u0178',
# pdftotext: fix accute accent:
u'\x13a' : u'\u00E1',
u'\x13e' : u'\u00E9',
u'\x13i' : u'\u00ED',
u'\x13o' : u'\u00F3',
u'\x13u' : u'\u00FA',
u'\x13y' : u'\u00FD',
u'\x13A' : u'\u00C1',
u'\x13E' : u'\u00C9',
u'\x13I' : u'\u00CD',
u'\x13ı' : u'\u00ED', # Lower case turkish 'i' (dotless i)
u'\x13O' : u'\u00D3',
u'\x13U' : u'\u00DA',
u'\x13Y' : u'\u00DD',
u'\x13 a' : u'\u00E1',
u'\x13 e' : u'\u00E9',
u'\x13 i' : u'\u00ED',
u'\x13 o' : u'\u00F3',
u'\x13 u' : u'\u00FA',
u'\x13 y' : u'\u00FD',
u'\x13 A' : u'\u00C1',
u'\x13 E' : u'\u00C9',
u'\x13 I' : u'\u00CD',
u'\x13 ı' : u'\u00ED',
u'\x13 O' : u'\u00D3',
u'\x13 U' : u'\u00DA',
u'\x13 Y' : u'\u00DD',
u'\u00B4 a' : u'\u00E1',
u'\u00B4 e' : u'\u00E9',
u'\u00B4 i' : u'\u00ED',
u'\u00B4 o' : u'\u00F3',
u'\u00B4 u' : u'\u00FA',
u'\u00B4 y' : u'\u00FD',
u'\u00B4 A' : u'\u00C1',
u'\u00B4 E' : u'\u00C9',
u'\u00B4 I' : u'\u00CD',
u'\u00B4 ı' : u'\u00ED',
u'\u00B4 O' : u'\u00D3',
u'\u00B4 U' : u'\u00DA',
u'\u00B4 Y' : u'\u00DD',
u'\u00B4a' : u'\u00E1',
u'\u00B4e' : u'\u00E9',
u'\u00B4i' : u'\u00ED',
u'\u00B4o' : u'\u00F3',
u'\u00B4u' : u'\u00FA',
u'\u00B4y' : u'\u00FD',
u'\u00B4A' : u'\u00C1',
u'\u00B4E' : u'\u00C9',
u'\u00B4I' : u'\u00CD',
u'\u00B4ı' : u'\u00ED',
u'\u00B4O' : u'\u00D3',
u'\u00B4U' : u'\u00DA',
u'\u00B4Y' : u'\u00DD',
# pdftotext: fix grave accent:
u'\u0060 a' : u'\u00E0',
u'\u0060 e' : u'\u00E8',
u'\u0060 i' : u'\u00EC',
u'\u0060 o' : u'\u00F2',
u'\u0060 u' : u'\u00F9',
u'\u0060 A' : u'\u00C0',
u'\u0060 E' : u'\u00C8',
u'\u0060 I' : u'\u00CC',
u'\u0060 O' : u'\u00D2',
u'\u0060 U' : u'\u00D9',
u'\u0060a' : u'\u00E0',
u'\u0060e' : u'\u00E8',
u'\u0060i' : u'\u00EC',
u'\u0060o' : u'\u00F2',
u'\u0060u' : u'\u00F9',
u'\u0060A' : u'\u00C0',
u'\u0060E' : u'\u00C8',
u'\u0060I' : u'\u00CC',
u'\u0060O' : u'\u00D2',
u'\u0060U' : u'\u00D9',
# \02C7 : caron
u'\u02C7C' : u'\u010C',
u'\u02C7c' : u'\u010D',
u'\u02C7S' : u'\u0160',
u'\u02C7s' : u'\u0161',
u'\u02C7Z' : u'\u017D',
u'\u02C7z' : u'\u017E',
# \027 : aa (a with ring above)
u'\u02DAa' : u'\u00E5',
u'\u02DAA' : u'\u00C5',
# \030 : cedilla
u'\u0327c' : u'\u00E7',
u'\u0327C' : u'\u00C7',
# \02DC : tilde
u'\u02DCn' : u'\u00F1',
u'\u02DCN' : u'\u00D1',
u'\u02DCo' : u'\u00F5',
u'\u02DCO' : u'\u00D5',
u'\u02DCa' : u'\u00E3',
u'\u02DCA' : u'\u00C3',
u'\u02DCs' : u'\u0303s', # Combining tilde with 's'
}
UNDESIRABLE_STRING_REPLACEMENTS = [
(u'\u201c ', '"'),
]
def replace_undesirable_characters(line):
"""
Replace certain bad characters in a text line.
@param line: (string) the text line in which bad characters are to
be replaced.
@return: (string) the text line after the bad characters have been
replaced.
"""
for bad_string, replacement in UNDESIRABLE_STRING_REPLACEMENTS:
line = line.replace(bad_string, replacement)
for bad_char, replacement in iteritems(UNDESIRABLE_CHAR_REPLACEMENTS):
line = line.replace(bad_char, replacement)
return line
def pdftotext_conversion_is_bad(txtlines):
"""Sometimes pdftotext performs a bad conversion which consists of many
spaces and garbage characters.
This method takes a list of strings obtained from a pdftotext conversion
and examines them to see if they are likely to be the result of a bad
conversion.
@param txtlines: (list) of unicode strings obtained from pdftotext
conversion.
@return: (integer) - 1 if bad conversion; 0 if good conversion.
"""
# Numbers of 'words' and 'whitespaces' found in document:
numWords = numSpaces = 0
# whitespace character pattern:
p_space = re.compile(unicode(r'(\s)'), re.UNICODE)
# non-whitespace 'word' pattern:
p_noSpace = re.compile(unicode(r'(\S+)'), re.UNICODE)
for txtline in txtlines:
numWords = numWords + len(p_noSpace.findall(txtline.strip()))
numSpaces = numSpaces + len(p_space.findall(txtline.strip()))
if numSpaces >= (numWords * 3):
# Too many spaces - probably bad conversion
return True
else:
return False
def convert_PDF_to_plaintext(fpath, keep_layout=False):
""" Convert PDF to txt using pdftotext
Take the path to a PDF file and run pdftotext for this file, capturing
the output.
@param fpath: (string) path to the PDF file
@return: (list) of unicode strings (contents of the PDF file translated
into plaintext; each string is a line in the document.)
"""
if keep_layout:
layout_option = "-layout"
else:
layout_option = "-raw"
status = 0
doclines = []
# Pattern to check for lines with a leading page-break character.
# If this pattern is matched, we want to split the page-break into
# its own line because we rely upon this for trying to strip headers
# and footers, and for some other pattern matching.
p_break_in_line = re.compile(ur'^\s*\f(.+)$', re.UNICODE)
# build pdftotext command:
cmd_pdftotext = [CFG_PATH_PDFTOTEXT, layout_option, "-q",
"-enc", "UTF-8", fpath, "-"]
write_message("* %s" % ' '.join(cmd_pdftotext), verbose=2)
# open pipe to pdftotext:
pipe_pdftotext = subprocess.Popen(cmd_pdftotext, stdout=subprocess.PIPE)
# read back results:
for docline in pipe_pdftotext.stdout:
unicodeline = docline.decode("utf-8")
# Check for a page-break in this line:
m_break_in_line = p_break_in_line.match(unicodeline)
if m_break_in_line is None:
# There was no page-break in this line. Just add the line:
doclines.append(unicodeline)
else:
# If there was a page-break character in the same line as some
# text, split it out into its own line so that we can later
# try to find headers and footers:
doclines.append(u"\f")
doclines.append(m_break_in_line.group(1))
write_message("* convert_PDF_to_plaintext found: " \
"%s lines of text" % len(doclines), verbose=2)
# finally, check conversion result not bad:
if pdftotext_conversion_is_bad(doclines):
status = 2
doclines = []
return (doclines, status)
| PXke/invenio | invenio/legacy/docextract/pdf.py | Python | gpl-2.0 | 16,893 |
"""
Base implementation for services available through a provider
"""
from cloudbridge.cloud.interfaces.resources import Router
from cloudbridge.cloud.interfaces.services import BlockStoreService
from cloudbridge.cloud.interfaces.services import CloudService
from cloudbridge.cloud.interfaces.services import ComputeService
from cloudbridge.cloud.interfaces.services import GatewayService
from cloudbridge.cloud.interfaces.services import ImageService
from cloudbridge.cloud.interfaces.services import InstanceService
from cloudbridge.cloud.interfaces.services import InstanceTypesService
from cloudbridge.cloud.interfaces.services import KeyPairService
from cloudbridge.cloud.interfaces.services import NetworkService
from cloudbridge.cloud.interfaces.services import NetworkingService
from cloudbridge.cloud.interfaces.services import ObjectStoreService
from cloudbridge.cloud.interfaces.services import RegionService
from cloudbridge.cloud.interfaces.services import RouterService
from cloudbridge.cloud.interfaces.services import SecurityGroupService
from cloudbridge.cloud.interfaces.services import SecurityService
from cloudbridge.cloud.interfaces.services import SnapshotService
from cloudbridge.cloud.interfaces.services import SubnetService
from cloudbridge.cloud.interfaces.services import VolumeService
from .resources import BasePageableObjectMixin
class BaseCloudService(CloudService):
def __init__(self, provider):
self._provider = provider
@property
def provider(self):
return self._provider
class BaseComputeService(ComputeService, BaseCloudService):
def __init__(self, provider):
super(BaseComputeService, self).__init__(provider)
class BaseVolumeService(
BasePageableObjectMixin, VolumeService, BaseCloudService):
def __init__(self, provider):
super(BaseVolumeService, self).__init__(provider)
class BaseSnapshotService(
BasePageableObjectMixin, SnapshotService, BaseCloudService):
def __init__(self, provider):
super(BaseSnapshotService, self).__init__(provider)
class BaseBlockStoreService(BlockStoreService, BaseCloudService):
def __init__(self, provider):
super(BaseBlockStoreService, self).__init__(provider)
class BaseImageService(
BasePageableObjectMixin, ImageService, BaseCloudService):
def __init__(self, provider):
super(BaseImageService, self).__init__(provider)
class BaseObjectStoreService(
BasePageableObjectMixin, ObjectStoreService, BaseCloudService):
def __init__(self, provider):
super(BaseObjectStoreService, self).__init__(provider)
class BaseSecurityService(SecurityService, BaseCloudService):
def __init__(self, provider):
super(BaseSecurityService, self).__init__(provider)
class BaseKeyPairService(
BasePageableObjectMixin, KeyPairService, BaseCloudService):
def __init__(self, provider):
super(BaseKeyPairService, self).__init__(provider)
def delete(self, key_pair_id):
"""
Delete an existing key pair.
:type key_pair_id: str
:param key_pair_id: The id of the key pair to be deleted.
:rtype: ``bool``
:return: ``True`` if the key does not exist. Note that this implies
that the key may not have been deleted by this method but
instead has not existed in the first place.
"""
kp = self.get(key_pair_id)
if kp:
kp.delete()
return True
class BaseSecurityGroupService(
BasePageableObjectMixin, SecurityGroupService, BaseCloudService):
def __init__(self, provider):
super(BaseSecurityGroupService, self).__init__(provider)
class BaseInstanceTypesService(
BasePageableObjectMixin, InstanceTypesService, BaseCloudService):
def __init__(self, provider):
super(BaseInstanceTypesService, self).__init__(provider)
def get(self, instance_type_id):
itype = (t for t in self if t.id == instance_type_id)
return next(itype, None)
def find(self, **kwargs):
name = kwargs.get('name')
if name:
return [itype for itype in self if itype.name == name]
else:
raise TypeError(
"Invalid parameters for search. Supported attributes: {name}")
class BaseInstanceService(
BasePageableObjectMixin, InstanceService, BaseCloudService):
def __init__(self, provider):
super(BaseInstanceService, self).__init__(provider)
class BaseRegionService(
BasePageableObjectMixin, RegionService, BaseCloudService):
def __init__(self, provider):
super(BaseRegionService, self).__init__(provider)
def find(self, name):
return [region for region in self if region.name == name]
class BaseNetworkingService(NetworkingService, BaseCloudService):
def __init__(self, provider):
super(BaseNetworkingService, self).__init__(provider)
class BaseNetworkService(
BasePageableObjectMixin, NetworkService, BaseCloudService):
def __init__(self, provider):
super(BaseNetworkService, self).__init__(provider)
@property
def subnets(self):
return [subnet for subnet in self.provider.subnets
if subnet.network_id == self.id]
def delete(self, network_id):
network = self.get(network_id)
if network:
network.delete()
return True
class BaseSubnetService(
BasePageableObjectMixin, SubnetService, BaseCloudService):
def __init__(self, provider):
super(BaseSubnetService, self).__init__(provider)
def find(self, **kwargs):
name = kwargs.get('name')
if name:
return [subnet for subnet in self if subnet.name == name]
else:
raise TypeError(
"Invalid parameters for search. Supported attributes: {name}")
class BaseRouterService(
BasePageableObjectMixin, RouterService, BaseCloudService):
def __init__(self, provider):
super(BaseRouterService, self).__init__(provider)
def delete(self, router):
if isinstance(router, Router):
router.delete()
else:
router = self.get(router)
if router:
router.delete()
class BaseGatewayService(
GatewayService, BaseCloudService):
def __init__(self, provider):
super(BaseGatewayService, self).__init__(provider)
| ms-azure-cloudbroker/cloudbridge | cloudbridge/cloud/base/services.py | Python | mit | 6,452 |
from .data import contract_template, company_name_column
from .fetch import get_sponsor, get_sponsors_ws_data
from .contract import create_sponsor_agreement
| EuroPython/ep-tools | eptools/sponsors/__init__.py | Python | mit | 158 |
# Copyright 2016-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from collections import defaultdict
from itertools import chain
from devlib.utils.misc import memoized
from wa.framework import pluginloader
from wa.framework.configuration.core import get_config_point_map
from wa.framework.exception import ConfigError, NotFoundError
from wa.framework.target.descriptor import list_target_descriptions
from wa.utils.types import obj_dict, caseless_string
GENERIC_CONFIGS = ["device_config", "workload_parameters",
"boot_parameters", "runtime_parameters"]
class PluginCache(object):
"""
The plugin cache is used to store configuration that cannot be processed at
this stage, whether thats because it is unknown if its needed
(in the case of disabled plug-ins) or it is not know what it belongs to (in
the case of "device-config" ect.). It also maintains where configuration came
from, and the priority order of said sources.
"""
def __init__(self, loader=pluginloader):
self.loader = loader
self.sources = []
self.plugin_configs = defaultdict(lambda: defaultdict(dict))
self.global_alias_values = defaultdict(dict)
self.targets = {td.name: td for td in list_target_descriptions()}
# Generate a mapping of what global aliases belong to
self._global_alias_map = defaultdict(dict)
self._list_of_global_aliases = set()
for plugin in self.loader.list_plugins():
for param in plugin.parameters:
if param.global_alias:
self._global_alias_map[plugin.name][param.global_alias] = param
self._list_of_global_aliases.add(param.global_alias)
def add_source(self, source):
if source in self.sources:
msg = "Source '{}' has already been added."
raise Exception(msg.format(source))
self.sources.append(source)
def add_global_alias(self, alias, value, source):
if source not in self.sources:
msg = "Source '{}' has not been added to the plugin cache."
raise RuntimeError(msg.format(source))
if not self.is_global_alias(alias):
msg = "'{} is not a valid global alias'"
raise RuntimeError(msg.format(alias))
self.global_alias_values[alias][source] = value
def add_configs(self, plugin_name, values, source):
if self.is_global_alias(plugin_name):
self.add_global_alias(plugin_name, values, source)
return
if source not in self.sources:
msg = "Source '{}' has not been added to the plugin cache."
raise RuntimeError(msg.format(source))
if caseless_string(plugin_name) in ['global', 'config']:
msg = '"{}" entry specified inside config/global section; If this is ' \
'defined in a config file, move the entry content into the top level'
raise ConfigError(msg.format((plugin_name)))
if (not self.loader.has_plugin(plugin_name) and
plugin_name not in self.targets and
plugin_name not in GENERIC_CONFIGS):
msg = 'configuration provided for unknown plugin "{}"'
raise ConfigError(msg.format(plugin_name))
if not hasattr(values, 'items'):
msg = 'Plugin configuration for "{}" not a dictionary ({} is {})'
raise ConfigError(msg.format(plugin_name, repr(values), type(values)))
for name, value in values.items():
if (plugin_name not in GENERIC_CONFIGS and
name not in self.get_plugin_parameters(plugin_name)):
msg = "'{}' is not a valid parameter for '{}'"
raise ConfigError(msg.format(name, plugin_name))
self.plugin_configs[plugin_name][source][name] = value
def is_global_alias(self, name):
return name in self._list_of_global_aliases
def list_plugins(self, kind=None):
return self.loader.list_plugins(kind)
def get_plugin_config(self, plugin_name, generic_name=None, is_final=True):
config = obj_dict(not_in_dict=['name'])
config.name = plugin_name
if plugin_name not in GENERIC_CONFIGS:
self._set_plugin_defaults(plugin_name, config)
self._set_from_global_aliases(plugin_name, config)
if generic_name is None:
# Perform a simple merge with the order of sources representing
# priority
plugin_config = self.plugin_configs[plugin_name]
cfg_points = self.get_plugin_parameters(plugin_name)
for source in self.sources:
if source not in plugin_config:
continue
for name, value in plugin_config[source].items():
cfg_points[name].set_value(config, value=value)
else:
# A more complicated merge that involves priority of sources and
# specificity
self._merge_using_priority_specificity(plugin_name, generic_name,
config, is_final)
return config
def get_plugin(self, name, kind=None, *args, **kwargs):
config = self.get_plugin_config(name)
kwargs = dict(list(config.items()) + list(kwargs.items()))
return self.loader.get_plugin(name, kind=kind, *args, **kwargs)
def get_plugin_class(self, name, kind=None):
return self.loader.get_plugin_class(name, kind)
@memoized
def get_plugin_parameters(self, name):
if name in self.targets:
return self._get_target_params(name)
params = self.loader.get_plugin_class(name).parameters
return get_config_point_map(params)
def resolve_alias(self, name):
return self.loader.resolve_alias(name)
def _set_plugin_defaults(self, plugin_name, config):
cfg_points = self.get_plugin_parameters(plugin_name)
for cfg_point in cfg_points.values():
cfg_point.set_value(config, check_mandatory=False)
try:
_, alias_params = self.resolve_alias(plugin_name)
for name, value in alias_params.items():
cfg_points[name].set_value(config, value)
except NotFoundError:
pass
def _set_from_global_aliases(self, plugin_name, config):
for alias, param in self._global_alias_map[plugin_name].items():
if alias in self.global_alias_values:
for source in self.sources:
if source not in self.global_alias_values[alias]:
continue
val = self.global_alias_values[alias][source]
param.set_value(config, value=val)
def _get_target_params(self, name):
td = self.targets[name]
return get_config_point_map(chain(td.target_params, td.platform_params, td.conn_params, td.assistant_params))
# pylint: disable=too-many-nested-blocks, too-many-branches
def _merge_using_priority_specificity(self, specific_name,
generic_name, merged_config, is_final=True):
"""
WA configuration can come from various sources of increasing priority,
as well as being specified in a generic and specific manner (e.g
``device_config`` and ``nexus10`` respectivly). WA has two rules for
the priority of configuration:
- Configuration from higher priority sources overrides
configuration from lower priority sources.
- More specific configuration overrides less specific configuration.
There is a situation where these two rules come into conflict. When a
generic configuration is given in config source of high priority and a
specific configuration is given in a config source of lower priority.
In this situation it is not possible to know the end users intention
and WA will error.
:param specific_name: The name of the specific configuration used
e.g ``nexus10``
:param generic_name: The name of the generic configuration
e.g ``device_config``
:param merge_config: A dict of ``ConfigurationPoint``s to be used when
merging configuration. keys=config point name,
values=config point
:param is_final: if ``True`` (the default) make sure that mandatory
parameters are set.
:rtype: A fully merged and validated configuration in the form of a
obj_dict.
"""
ms = MergeState()
ms.generic_name = generic_name
ms.specific_name = specific_name
ms.generic_config = copy(self.plugin_configs[generic_name])
ms.specific_config = copy(self.plugin_configs[specific_name])
ms.cfg_points = self.get_plugin_parameters(specific_name)
sources = self.sources
# set_value uses the 'name' attribute of the passed object in it error
# messages, to ensure these messages make sense the name will have to be
# changed several times during this function.
merged_config.name = specific_name
for source in sources:
try:
update_config_from_source(merged_config, source, ms)
except ConfigError as e:
raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
# Validate final configuration
merged_config.name = specific_name
for cfg_point in ms.cfg_points.values():
cfg_point.validate(merged_config, check_mandatory=is_final)
def __getattr__(self, name):
"""
This resolves methods for specific plugins types based on corresponding
generic plugin methods. So it's possible to say things like ::
loader.get_device('foo')
instead of ::
loader.get_plugin('foo', kind='device')
"""
error_msg = 'No plugins of type "{}" discovered'
if name.startswith('get_'):
name = name.replace('get_', '', 1)
if name in self.loader.kind_map:
def __wrapper(pname, *args, **kwargs):
return self.get_plugin(pname, name, *args, **kwargs)
return __wrapper
raise NotFoundError(error_msg.format(name))
if name.startswith('list_'):
name = name.replace('list_', '', 1).rstrip('s')
if name in self.loader.kind_map:
def __wrapper(*args, **kwargs): # pylint: disable=E0102
return self.list_plugins(name, *args, **kwargs)
return __wrapper
raise NotFoundError(error_msg.format(name))
if name.startswith('has_'):
name = name.replace('has_', '', 1)
if name in self.loader.kind_map:
def __wrapper(pname, *args, **kwargs): # pylint: disable=E0102
return self.loader.has_plugin(pname, name, *args, **kwargs)
return __wrapper
raise NotFoundError(error_msg.format(name))
raise AttributeError(name)
class MergeState(object):
def __init__(self):
self.generic_name = None
self.specific_name = None
self.generic_config = None
self.specific_config = None
self.cfg_points = None
self.seen_specific_config = defaultdict(list)
def update_config_from_source(final_config, source, state):
if source in state.generic_config:
final_config.name = state.generic_name
for name, cfg_point in state.cfg_points.items():
if name in state.generic_config[source]:
if name in state.seen_specific_config:
msg = ('"{generic_name}" configuration "{config_name}" has '
'already been specified more specifically for '
'{specific_name} in:\n\t\t{sources}')
seen_sources = state.seen_specific_config[name]
msg = msg.format(generic_name=state.generic_name,
config_name=name,
specific_name=state.specific_name,
sources=", ".join(seen_sources))
raise ConfigError(msg)
value = state.generic_config[source].pop(name)
cfg_point.set_value(final_config, value, check_mandatory=False)
if state.generic_config[source]:
msg = 'Unexpected values for {}: {}'
raise ConfigError(msg.format(state.generic_name,
state.generic_config[source]))
if source in state.specific_config:
final_config.name = state.specific_name
for name, cfg_point in state.cfg_points.items():
if name in state.specific_config[source]:
state.seen_specific_config[name].append(str(source))
value = state.specific_config[source].pop(name)
cfg_point.set_value(final_config, value, check_mandatory=False)
if state.specific_config[source]:
msg = 'Unexpected values for {}: {}'
raise ConfigError(msg.format(state.specific_name,
state.specific_config[source]))
| lisatn/workload-automation | wa/framework/configuration/plugin_cache.py | Python | apache-2.0 | 13,957 |
# This file is part of Gajim.
#
# Gajim is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; version 3 only.
#
# Gajim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gajim. If not, see <http://www.gnu.org/licenses/>.
# XEP-0070: Verifying HTTP Requests via XMPP
import nbxmpp
from nbxmpp.structs import StanzaHandler
from nbxmpp.namespaces import Namespace
from gajim.common import app
from gajim.common.events import HttpAuth
from gajim.common.modules.base import BaseModule
class HTTPAuth(BaseModule):
def __init__(self, con):
BaseModule.__init__(self, con)
self.handlers = [
StanzaHandler(name='message',
callback=self._http_auth,
ns=Namespace.HTTP_AUTH,
priority=45),
StanzaHandler(name='iq',
callback=self._http_auth,
typ='get',
ns=Namespace.HTTP_AUTH,
priority=45)
]
def _http_auth(self, _con, stanza, properties):
if not properties.is_http_auth:
return
self._log.info('Auth request received')
auto_answer = app.settings.get_account_setting(self._account,
'http_auth')
if auto_answer in ('yes', 'no'):
self.build_http_auth_answer(stanza, auto_answer)
raise nbxmpp.NodeProcessed
app.ged.raise_event(
HttpAuth(client=self._con,
data=properties.http_auth,
stanza=stanza))
raise nbxmpp.NodeProcessed
def build_http_auth_answer(self, stanza, answer):
if answer == 'yes':
self._log.info('Auth request approved')
confirm = stanza.getTag('confirm')
reply = stanza.buildReply('result')
if stanza.getName() == 'message':
reply.addChild(node=confirm)
self._con.connection.send(reply)
elif answer == 'no':
self._log.info('Auth request denied')
err = nbxmpp.Error(stanza, nbxmpp.protocol.ERR_NOT_AUTHORIZED)
self._con.connection.send(err)
| gajim/gajim | gajim/common/modules/http_auth.py | Python | gpl-3.0 | 2,575 |
# -*- coding: utf-8 -*-
"""
Django settings for To-Do-List project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import environ
import os
ROOT_DIR = environ.Path(__file__) - 3 # (to_do_list/config/settings/common.py - 3 = to_do_list/)
APPS_DIR = ROOT_DIR.path('to_do_list')
env = environ.Env()
env.read_env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
# custom users app
'to_do_list.users.apps.UsersConfig',
# Your stuff: custom apps go here
'django_tables2',
'django_filters',
'captcha',
'to_do_list.tasks',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'to_do_list.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Arnaud Blois""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# -----------------------------------------------------------------------------
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
# Setting up a TRAVIS Database settings
# more info at https://gist.github.com/ndarville/3625246 and
# http://www.lesinskis.com/travis_ci_django.html
if 'BUILD_ON_TRAVIS' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'travisci',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
else:
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///origin'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'to_do_list.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'to_do_list.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'tasks:home'
LOGIN_URL = 'account_login'
ACCOUNT_SIGNUP_FORM_CLASS = 'to_do_list.users.forms.AllauthSignupForm'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
# Default keys givn here instruct Google to use the test recaptcha which always validates
RECAPTCHA_PUBLIC_KEY = env('RECAPTCHA_PUBLIC_KEY', default='6LeIxAcTAAAAAJcZVRqyHh71UMIEGNQ_MXjiZKhI')
RECAPTCHA_PRIVATE_KEY = env('RECAPTCHA_PRIVATE_KEY', default='6LeIxAcTAAAAAGG-vFI1TnRWxMZNFuojJ4WifJWe')
NOCAPTCHA = True
| arnaudblois/to_do_list | config/settings/common.py | Python | mit | 10,018 |
#!/usr/bin/env python
# 2015 Copyright (C) White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flask import render_template
from white.controller import admin_bp as bp, ADMIN, EDITOR
from white.security import security
@bp.route('/extend')
@security(ADMIN)
def extend_index():
return render_template('admin/extend/index.html')
@bp.route('/extend/variable')
@security(ADMIN)
def variable_index():
return render_template('admin/extend/variable/index.html')
@bp.route('/extend/variable/add')
@security(ADMIN)
def variable_add_page():
return render_template('admin/extend/variable/add.html')
@bp.route('/extend/plugin')
@security(ADMIN)
def extend_plugin():
return render_template('admin/extend/plugin/index.html') | thomashuang/white | white/controller/admin/extend.py | Python | gpl-2.0 | 1,282 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/control.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/api/control.proto",
package="google.api",
syntax="proto3",
serialized_options=b"\n\016com.google.apiB\014ControlProtoP\001ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\242\002\004GAPI",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x18google/api/control.proto\x12\ngoogle.api"\x1e\n\x07\x43ontrol\x12\x13\n\x0b\x65nvironment\x18\x01 \x01(\tBn\n\x0e\x63om.google.apiB\x0c\x43ontrolProtoP\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\xa2\x02\x04GAPIb\x06proto3',
)
_CONTROL = _descriptor.Descriptor(
name="Control",
full_name="google.api.Control",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="environment",
full_name="google.api.Control.environment",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=40,
serialized_end=70,
)
DESCRIPTOR.message_types_by_name["Control"] = _CONTROL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Control = _reflection.GeneratedProtocolMessageType(
"Control",
(_message.Message,),
{
"DESCRIPTOR": _CONTROL,
"__module__": "google.api.control_pb2"
# @@protoc_insertion_point(class_scope:google.api.Control)
},
)
_sym_db.RegisterMessage(Control)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| martbhell/wasthereannhlgamelastnight | src/lib/google/api/control_pb2.py | Python | mit | 3,161 |
#
# joeecc - A small Elliptic Curve Cryptography Demonstration.
# Copyright (C) 2011-2016 Johannes Bauer
#
# This file is part of joeecc.
#
# joeecc is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; this program is ONLY licensed under
# version 3 of the License, later versions are explicitly excluded.
#
# joeecc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with joeecc; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Johannes Bauer <[email protected]>
#
import hashlib
import base64
import inspect
def bytestoint_le(data):
"""Converts given bytes to a little-endian integer value."""
return sum(value << (8 * index) for (index, value) in enumerate(data))
def inttobytes_le(value, length):
"""Converts a little-endian integer value into a bytes object."""
return bytes((value >> (8 * i)) & 0xff for i in range(length))
def bytestoint(data):
"""Converts given bytes to a big-endian integer value."""
return bytestoint_le(reversed(data))
def inttobytes(value, length):
"""Converts a big-endian integer value into a bytes object."""
return bytes((value >> (8 * i)) & 0xff for i in reversed(range(length)))
def bits_to_bytes(bitarray):
"""Converts a tuple of bits (e.g. a ASN.1 BitString) to a bytes object.
Only works when number of bits is a multiple of 8."""
def bit_word_to_value(word):
assert(len(word) == 8)
return sum(value << i for (i, value) in enumerate(reversed(word)))
assert((len(bitarray) % 8) == 0)
return bytes(bit_word_to_value(bitarray[i : i + 8]) for i in range(0, len(bitarray), 8))
def ecdsa_msgdigest_to_int(message_digest, curveorder):
"""Performs truncation of a message digest to the bitlength of the curve
order."""
# Convert message digest to integer value
e = bytestoint(message_digest)
# Truncate hash value if necessary
msg_digest_bits = 8 * len(message_digest)
if msg_digest_bits > curveorder.bit_length():
shift = msg_digest_bits - curveorder.bit_length()
e >>= shift
return e
def eddsa_hash(data):
"""Returns the message digest over the data which is used for EdDSA
(SHA-512)."""
return hashlib.sha512(data).digest()
def load_pem_data(filename, specifier):
"""Loads the PEM payload, designated with a BEGIN and END specifier, from a
file given by its filename."""
data = None
with open(filename, "r") as f:
spec_begin = "-----BEGIN " + specifier + "-----"
spec_end = "-----END " + specifier + "-----"
for line in f:
line = line.rstrip()
if (data is None) and (line == spec_begin):
data = [ ]
elif (data is not None) and (line == spec_end):
break
elif data is not None:
data.append(line)
if data is None:
raise Exception("Trying to parse PEM file with specifier '%s', but no such block in file found." % (specifier))
data = base64.b64decode("".join(data).encode("utf-8"))
return data
def is_power_of_two(value):
"""Returns True if the given value is a positive power of two, False
otherwise."""
while value > 0:
if value == 1:
return True
elif (value & 1) == 1:
return False
value >>= 1
return False
#def inheritdocstring(cls):
# for base in inspect.getmro(cls):
# if base.__doc__ is not None:
# cls.__doc__ = base.__doc__
# break
# return cls
| johndoe31415/joeecc | ecc/Tools.py | Python | gpl-3.0 | 3,622 |
#!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test data for ds"""
__author__ = '[email protected] (Takashi MATSUO)'
TEST_OBJECT = """<?xml version="1.0" encoding="utf-8"?>
<Object Id="object_id" Encoding="http://www.w3.org/2000/09/xmldsig#base64"
xmlns="http://www.w3.org/2000/09/xmldsig#">
V2VkIEp1biAgNCAxMjoxMTowMyBFRFQgMjAwMwo
</Object>
"""
TEST_MGMT_DATA = """<?xml version="1.0" encoding="utf-8"?>
<MgmtData xmlns="http://www.w3.org/2000/09/xmldsig#">
mgmt data
</MgmtData>
"""
TEST_SPKI_SEXP = """<?xml version="1.0" encoding="utf-8"?>
<SPKISexp xmlns="http://www.w3.org/2000/09/xmldsig#">
spki sexp
</SPKISexp>
"""
TEST_SPKI_DATA = """<?xml version="1.0" encoding="utf-8"?>
<SPKIData xmlns="http://www.w3.org/2000/09/xmldsig#">
<SPKISexp>spki sexp</SPKISexp>
<SPKISexp>spki sexp2</SPKISexp>
</SPKIData>
"""
TEST_PGP_DATA = """<?xml version="1.0" encoding="utf-8"?>
<PGPData xmlns="http://www.w3.org/2000/09/xmldsig#">
<PGPKeyID>pgp key id</PGPKeyID>
<PGPKeyPacket>pgp key packet</PGPKeyPacket>
</PGPData>
"""
TEST_X509_ISSUER_SERIAL = """<?xml version="1.0" encoding="utf-8"?>
<X509IssuerSerial xmlns="http://www.w3.org/2000/09/xmldsig#">
<X509IssuerName>issuer name</X509IssuerName>
<X509IssuerNumber>1</X509IssuerNumber>
</X509IssuerSerial>
"""
TEST_X509_DATA = """<?xml version="1.0" encoding="utf-8"?>
<X509Data xmlns="http://www.w3.org/2000/09/xmldsig#">
<X509IssuerSerial>
<X509IssuerName>issuer name</X509IssuerName>
<X509IssuerNumber>1</X509IssuerNumber>
</X509IssuerSerial>
<X509SKI>x509 ski</X509SKI>
<X509SubjectName>x509 subject name</X509SubjectName>
<X509Certificate>x509 certificate</X509Certificate>
<X509CRL>x509 crl</X509CRL>
</X509Data>
"""
TEST_TRANSFORM = """<?xml version="1.0" encoding="utf-8"?>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"
xmlns="http://www.w3.org/2000/09/xmldsig#">
<XPath>xpath</XPath>
</Transform>
"""
TEST_TRANSFORMS = """<?xml version="1.0" encoding="utf-8"?>
<Transforms xmlns="http://www.w3.org/2000/09/xmldsig#">
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
</Transforms>
"""
TEST_RETRIEVAL_METHOD = """<?xml version="1.0" encoding="utf-8"?>
<RetrievalMethod xmlns="http://www.w3.org/2000/09/xmldsig#"
URI="http://www.sios.com/URI"
Type="http://www.sios.com/Type">
<Transforms>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
</Transforms>
</RetrievalMethod>
"""
TEST_RSA_KEY_VALUE = """<?xml version="1.0" encoding="utf-8"?>
<RSAKeyValue xmlns="http://www.w3.org/2000/09/xmldsig#">
<Modulus>modulus</Modulus>
<Exponent>exponent</Exponent>
</RSAKeyValue>
"""
TEST_DSA_KEY_VALUE = """<?xml version="1.0" encoding="utf-8"?>
<DSAKeyValue xmlns="http://www.w3.org/2000/09/xmldsig#">
<P>p</P>
<Q>q</Q>
<G>g</G>
<Y>y</Y>
<J>j</J>
<Seed>seed</Seed>
<PgenCounter>pgen counter</PgenCounter>
</DSAKeyValue>
"""
TEST_KEY_VALUE1 = """<?xml version="1.0" encoding="utf-8"?>
<KeyValue xmlns="http://www.w3.org/2000/09/xmldsig#">
<DSAKeyValue>
<P>p</P>
<Q>q</Q>
<G>g</G>
<Y>y</Y>
<J>j</J>
<Seed>seed</Seed>
<PgenCounter>pgen counter</PgenCounter>
</DSAKeyValue>
</KeyValue>
"""
TEST_KEY_VALUE2 = """<?xml version="1.0" encoding="utf-8"?>
<KeyValue xmlns="http://www.w3.org/2000/09/xmldsig#">
<RSAKeyValue xmlns="http://www.w3.org/2000/09/xmldsig#">
<Modulus>modulus</Modulus>
<Exponent>exponent</Exponent>
</RSAKeyValue>
</KeyValue>
"""
TEST_KEY_NAME = """<?xml version="1.0" encoding="utf-8"?>
<KeyName xmlns="http://www.w3.org/2000/09/xmldsig#">
key name
</KeyName>
"""
TEST_KEY_INFO = """<?xml version="1.0" encoding="utf-8"?>
<KeyInfo xmlns="http://www.w3.org/2000/09/xmldsig#" Id="id">
<KeyName>
key name
</KeyName>
<KeyValue>
<DSAKeyValue>
<P>p</P>
<Q>q</Q>
<G>g</G>
<Y>y</Y>
<J>j</J>
<Seed>seed</Seed>
<PgenCounter>pgen counter</PgenCounter>
</DSAKeyValue>
</KeyValue>
<RetrievalMethod URI="http://www.sios.com/URI"
Type="http://www.sios.com/Type">
<Transforms>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
</Transforms>
</RetrievalMethod>
<X509Data>
<X509IssuerSerial>
<X509IssuerName>issuer name</X509IssuerName>
<X509IssuerNumber>1</X509IssuerNumber>
</X509IssuerSerial>
<X509SKI>x509 ski</X509SKI>
<X509SubjectName>x509 subject name</X509SubjectName>
<X509Certificate>x509 certificate</X509Certificate>
<X509CRL>x509 crl</X509CRL>
</X509Data>
<PGPData>
<PGPKeyID>pgp key id</PGPKeyID>
<PGPKeyPacket>pgp key packet</PGPKeyPacket>
</PGPData>
<MgmtData>
mgmt data
</MgmtData>
<SPKIData>
<SPKISexp>spki sexp</SPKISexp>
<SPKISexp>spki sexp2</SPKISexp>
</SPKIData>
</KeyInfo>
"""
TEST_DIGEST_VALUE = """<?xml version="1.0" encoding="utf-8"?>
<DigestValue xmlns="http://www.w3.org/2000/09/xmldsig#">
digest value
</DigestValue>
"""
TEST_DIGEST_METHOD = """<?xml version="1.0" encoding="utf-8"?>
<DigestMethod xmlns="http://www.w3.org/2000/09/xmldsig#"
Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
"""
TEST_REFERENCE = """<?xml version="1.0" encoding="utf-8"?>
<Reference xmlns="http://www.w3.org/2000/09/xmldsig#" Id="id"
URI="http://www.sios.com/URI"
Type="http://www.sios.com/Type">
<Transforms>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
</Transforms>
<DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
<DigestValue>digest value</DigestValue>
</Reference>
"""
TEST_SIGNATURE_METHOD = """<?xml version="1.0" encoding="utf-8"?>
<SignatureMethod xmlns="http://www.w3.org/2000/09/xmldsig#"
Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1">
<HMACOutputLength>8</HMACOutputLength>
</SignatureMethod>
"""
TEST_CANONICALIZATION_METHOD = """<?xml version="1.0" encoding="utf-8"?>
<CanonicalizationMethod xmlns="http://www.w3.org/2000/09/xmldsig#"
Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments">
</CanonicalizationMethod>
"""
TEST_SIGNED_INFO = """<?xml version="1.0" encoding="utf-8"?>
<SignedInfo xmlns="http://www.w3.org/2000/09/xmldsig#" Id="id">
<CanonicalizationMethod
Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments">
</CanonicalizationMethod>
<SignatureMethod
Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1">
<HMACOutputLength>8</HMACOutputLength>
</SignatureMethod>
<Reference Id="id" URI="http://www.sios.com/URI"
Type="http://www.sios.com/Type">
<Transforms>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
</Transforms>
<DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
<DigestValue>digest value</DigestValue>
</Reference>
</SignedInfo>
"""
TEST_SIGNATURE_VALUE = """<?xml version="1.0" encoding="utf-8"?>
<SignatureValue xmlns="http://www.w3.org/2000/09/xmldsig#" Id="id">
signature value
</SignatureValue>
"""
TEST_SIGNATURE = """<?xml version="1.0" encoding="utf-8"?>
<Signature xmlns="http://www.w3.org/2000/09/xmldsig#" Id="id">
<SignedInfo Id="id">
<CanonicalizationMethod
Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments">
</CanonicalizationMethod>
<SignatureMethod
Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1">
<HMACOutputLength>8</HMACOutputLength>
</SignatureMethod>
<Reference Id="id" URI="http://www.sios.com/URI"
Type="http://www.sios.com/Type">
<Transforms>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
</Transforms>
<DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
<DigestValue>digest value</DigestValue>
</Reference>
</SignedInfo>
<SignatureValue Id="id">
signature value
</SignatureValue>
<KeyInfo Id="id">
<KeyName>
key name
</KeyName>
<KeyValue>
<DSAKeyValue>
<P>p</P>
<Q>q</Q>
<G>g</G>
<Y>y</Y>
<J>j</J>
<Seed>seed</Seed>
<PgenCounter>pgen counter</PgenCounter>
</DSAKeyValue>
</KeyValue>
<RetrievalMethod URI="http://www.sios.com/URI"
Type="http://www.sios.com/Type">
<Transforms>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature">
<XPath>xpath</XPath>
</Transform>
</Transforms>
</RetrievalMethod>
<X509Data>
<X509IssuerSerial>
<X509IssuerName>issuer name</X509IssuerName>
<X509IssuerNumber>1</X509IssuerNumber>
</X509IssuerSerial>
<X509SKI>x509 ski</X509SKI>
<X509SubjectName>x509 subject name</X509SubjectName>
<X509Certificate>x509 certificate</X509Certificate>
<X509CRL>x509 crl</X509CRL>
</X509Data>
<PGPData>
<PGPKeyID>pgp key id</PGPKeyID>
<PGPKeyPacket>pgp key packet</PGPKeyPacket>
</PGPData>
<MgmtData>
mgmt data
</MgmtData>
<SPKIData>
<SPKISexp>spki sexp</SPKISexp>
<SPKISexp>spki sexp2</SPKISexp>
</SPKIData>
</KeyInfo>
<Object Id="object_id" Encoding="http://www.w3.org/2000/09/xmldsig#base64">
V2VkIEp1biAgNCAxMjoxMTowMyBFRFQgMjAwMwo
</Object>
</Signature>
"""
| Rondineli/django-sso | django_sso/python-saml2-read-only/src/saml2/ds_test_data.py | Python | gpl-2.0 | 11,058 |
#! /usr/bin/env python
import json
import sys
sys.path.append('..')
from CodewebsIndexClient import CodewebsIndexClient
def loadTextFile(fname):
with open(fname) as fid:
return fid.read()
def wrap(ast,code,map,codeblockid):
astjson = json.loads(ast)
wrappedJSON = {'ast': astjson,
'code': code,
'map': map,
'codeblockid': codeblockid,
'startline': 4,
'startlineindex': 20,
'endline': 4,
'endlineindex': 28,
'querytype': 3}
#return json.dumps(wrappedJSON,sort_keys = True,indent=4,separators=(',',': '))
return json.dumps(wrappedJSON)
def run():
codeblockid = 30
asttext = loadTextFile('ast.json')
codetext = loadTextFile('code')
maptext = loadTextFile('map')
inputJSON = wrap(asttext,codetext,maptext,codeblockid)
asttext_bad = loadTextFile('ast_pinv.json')
codetext_bad = loadTextFile('code_pinv')
maptext_bad = loadTextFile('map_pinv')
inputJSON_bad = wrap(asttext_bad,codetext_bad,maptext_bad,codeblockid)
cwindex = CodewebsIndexClient()
print " [x] Requesting!"
response = cwindex.call(inputJSON)
print " [.] Got %r" % (response,)
print " [x] Requesting!"
response = cwindex.call(inputJSON_bad)
print " [.] Got %r" % (response,)
if __name__ == '__main__':
run()
| tanonev/codewebs | src/analogyFinder/src/daemons/example/exampleclient.py | Python | mit | 1,439 |
#coding=UTF-8
'''
Created on 2011-7-5
@author: Administrator
'''
import cookielib
import urllib2
from pyquery.pyquery import PyQuery
import re
from config import housetype, checkPath, makePath,citynameDict_sf
import datetime
import time
import threading
from BeautifulSoup import BeautifulSoup
from jjrlog import LinkLog
from jjrlog import msglogger
from common import postHost,printRsult
import gc
gc.enable()
homepath="e:\\home\\spider\\"
getheader={
"User-Agent": "Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
}
class LinkCrawl(object):
def __init__(self,citycode="",kind="",upc="5",st="3"):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.clinks=[]
self.pn=[]
self.citycode=citycode
self.kind=kind
self.st=st
if kind=="3":#1求购
self.urlpath="http://esf.%s.soufun.com/qiugou/i3%s/"
self.baseurl="http://esf.%s.soufun.com"%self.citycode
self.folder="buy\\"
elif kind=="4":#2求租
self.urlpath="http://rent.%s.soufun.com/qiuzu/i3%s/"
self.baseurl="http://rent.%s.soufun.com"%self.citycode
self.folder="req\\"
elif kind=="2":#出租
self.urlpath="http://rent.%s.soufun.com/house/a21-i%s/"
self.baseurl="http://rent.%s.soufun.com"%self.citycode
self.folder="rent\\"
elif kind=="1":#出售
self.urlpath="http://esf.%s.soufun.com/house/a21-i%s/"
self.baseurl="http://esf.%s.soufun.com"%self.citycode
self.folder="sell\\"
def __getPageAllLink(self,p):
# if self.kind=="1":
# lis=PyQuery(p)("div.qiuzu li")
# elif self.kind=="2":
# lis=PyQuery(p)("div.qiuzu li")
if self.kind=="1" or self.kind=="2":
lis=PyQuery(p)("div.house")
else:
lis=PyQuery(p)("div.qiuzu li")
links=[]
for li in lis:
# if self.kind=="3":
# tm=PyQuery(li)("p.time span").eq(1).text()
# link=self.baseurl+PyQuery(li)("p.housetitle a").attr("href")
if self.kind=="2" or self.kind=="1":
tm=PyQuery(li)("p.time").text()
tm=tm and tm.replace("个人","") or ""
link=self.baseurl+PyQuery(li)("p.housetitle a").attr("href")
else:
tm=PyQuery(li)("span.li5").text()
link=self.baseurl+PyQuery(li)("span.li2 a").attr("href")
if self.kind=="4":
if PyQuery(li)("span.li1").text()=="合租 ":
continue
# tm=PyQuery(li)("span.li5").text()
# link=self.baseurl+PyQuery(li)("span.li2 a").attr("href")
#link=self.baseurl+PyQuery(li)("span.li2 a").attr("href")
# print link
if u"天" in tm:
s=tm.find(u"天")
tm=tm[:s]
if int(tm)<8:
links.append(link)
else:
break
elif u"小时" in tm:
links.append(link)
elif u"分钟" in tm:
links.append(link)
else:
continue
if 1:#not checkPath(homepath,self.folder,link):
LinkLog.info("%s|%s"%(self.kind,link))
try:
getContent(link,self.citycode,self.kind)
except Exception,e:print "ganji getContent Exception %s"%e
time.sleep(int(self.st))
# fetch_quere.put({"mod":"soufang","link":link,"citycode":self.citycode,"kind":self.kind})
# self.clinks.extend(links)
if self.kind=="1" or self.kind=="2":
if len(links)!=30:
return False
else:
return True
else:
if len(links)!=35:
return False
else:
return True
def __initPageNum(self):
initurl=(self.urlpath%(self.citycode,"1"))[:-4]
req=urllib2.Request(initurl, None, self.header)
try:
p=self.br.open(req).read()
except:
raise
p=unicode(p.decode("GB18030").encode("UTF-8"))
if self.kind=="1":
pg=PyQuery(p)("li#list_98").text()
else:
pg=PyQuery(p)("li#rentid_67").text()
# pg=PyQuery(p)("li#rentid_67").text()
if re.search('''1/(\d+) ''', pg):
pn=int(re.search('''1/(\d+) ''', pg).group(1))
# print pn
r=self.__getPageAllLink(p)
if not r:
return
self.pn=range(2,int(pn)+1)
def __getAllNeedLinks(self):
for i in self.pn:
if self.kind=="2" or self.kind=="1":
i="3%s"%i
url=self.urlpath%(self.citycode,i)
# print url
req=urllib2.Request(url, None, self.header)
p=self.br.open(req).read()
p=unicode(p.decode("GB18030").encode("UTF-8"))
r=self.__getPageAllLink(p)
if not r:
break
def runme(self):
self.__initPageNum()
self.__getAllNeedLinks()
# print len(self.clinks)
# return self.clinks
class ContentCrawl(object):
def __init__(self,links,citycode,kind):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.pdb={}
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.urls=links
self.kind=kind
self.fd={}
self.citycode=citycode
if kind=="1":
self.folder="sell\\"
elif kind=="2":
self.folder="rent\\"
elif kind=="3":
self.folder="buy\\"
else:
self.folder="req\\"
self.posttime_regx = '''\((.*?)前更新'''
self.owner_name_regx = '''<dd>联 系 人:(.*?)</dd>'''
self.owner_phone_regx = '''<span class="telno0">(\d+)</span>'''
self.owner_phone1_regx = '''<span class="telno">(.*) <span class="telzhuan">转</span>(.*)</span><br>'''
self.cityarea_regx = '''\( <a .*>(.*)</a> <a .*>.*</a> \)'''
self.borough_section_regx = '''\( <a .*>.*</a> <a .*>(.*)</a> \)'''
self.house_addr_regx = '''地 址:</span><a .*">(.*)</a>'''
self.house_addr1_regx = '''地 址:</span>[\s]+<a .*>(.*) <span'''
self.house_price_regx_rent = '''租 金:<span class="red20b">(\d+)</span>'''
self.house_price_regx_sell = '''总 价:<span .*>(.*)</span>'''
self.house_price_regx_buy = '''<li class="right">不超过<span class="red20b">(\d+)<'''
self.house_price_regx_req = '''<li class="right">不超过<span class="red20b">(\d+)</span>元/月'''
self.house_totalarea_regx_req = '''不小于(\d+)平米'''
self.house_totalarea_regx_rent = '''<dd>出租面积:(.*)平方米</dd>'''
self.house_totalarea_regx_sell = '''建筑面积:<span .*>(.*)</span>'''
self.house_totalarea_regx_buy = '''期望面积:</span>不小于(\d+)平米'''
self.house_room_regx = '''(\d)室'''
self.house_room1_regx = '''(.{3})居'''
self.house_hall_regx = '''(\d)厅'''
self.house_toilet_regx = '''(\d)卫'''
self.house_room_regx_sell = '''(.{3})室'''
self.house_hall_regx_sell = '''(.{3})厅'''
self.house_toilet_regx_sell = '''(.{3})卫'''
self.house_floor_regx = '''第(\d+)层'''
self.house_topfloor_regx = '''共(\d+)层'''
self.house_age_regx = '''建筑年代:</span>(.*)年</dd>'''
self.belong_regx = '''产权性质:</span>(.*)</dd>'''
self.paytype_regx = '''支付方式:(.*)</dd>'''
self.house_toward_regx = '''朝 向:</span>(.*)</dd>'''
self.house_type_regx = '''物业类型:</span>(.*)</dd>'''
self.house_fitment_regx = '''装 修:</span>(.*)</dd>'''
self.house_support_regx = '''房屋配套:</span><span .*>(.*)</span>'''
self.house_support1_regx = '''房屋配套:</span>(.*)</dt>'''
self.house_desc_regx = '''<div class="beizhu" .*><p>(.*?)</p>'''
self.house_desc_regx_req = '''<div class="qzbeizhu">(.*?)</p>'''
def require(self,url):
cookiestore=cookielib.MozillaCookieJar()
request = urllib2.Request(url,None , getheader)
br = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiestore),urllib2.HTTPRedirectHandler())
response=br.open(request).read()
response=response.decode("GB18030").encode("UTF-8")
soup = BeautifulSoup(response)
if self.parseHtml(self.posttime_regx, response):
posttime = self.parseHtml(self.posttime_regx, response)
posttime = re.sub('<.*?>','',posttime)
self.fd['posttime1'] =posttime
if posttime.find('天') != -1:
self.fd['posttime'] =int(posttime.replace('天',''))*3600*24
elif posttime.find('小时') != -1:
self.fd['posttime'] =int(posttime.replace('小时',''))*3600
elif posttime.find('分钟') != -1:
self.fd['posttime'] =int(posttime.replace('分钟',''))*60
elif posttime.find('秒') != -1:
self.fd['posttime'] =int(posttime.replace('秒',''))
else:
self.fd['posttime'] = 0
else:
self.fd["is_ok"] = False
return
info_class = soup.find('div',{'class':'info'})
info_class_str = str(info_class)
self.fd["house_title"] = info_class.h1.span.contents[0].strip() if info_class.h1.span else ""
tel = info_class.find('span',{'class':'tel'})
if tel and tel.font:
self.fd["owner_phone"] = tel.font.contents[0]
if tel.font.nextSibling:
self.fd["owner_name"] = tel.font.nextSibling.strip()
else:
self.fd["owner_name"] = ''
else:
self.fd['is_ok'] = False
return
if info_class.ul and info_class.ul.table and info_class.ul.table.td:
house_area = info_class.ul.table.td.contents[0].replace('[','')
self.fd["house_addr"]=house_area
if house_area.find(' ') != -1:
house_area = house_area.split(' ')
self.fd["borough_name"] =house_area[1]
self.fd["cityarea"]=house_area[0]
self.fd["borough_section"]=house_area[1]
else:
self.fd["borough_name"] =house_area
self.fd["cityarea"]= ''
self.fd["borough_section"]=''
else:
self.fd["borough_name"] =''
self.fd["cityarea"]= ''
self.fd["borough_section"]=''
self.fd["house_addr"]=''
house_class = soup.find('dl',{'class':'house'})
house_class_str = str(house_class)
self.fd["house_price"] = self.parseHtml(self.house_price_regx_req, response) if self.parseHtml(self.house_price_regx_req, response) else "0"
self.fd["house_price_min"] ="0"
self.fd["house_price_max"] =self.fd["house_price"]
self.fd["house_totalarea"] = self.parseHtml(self.house_totalarea_regx_req, response) if self.parseHtml(self.house_totalarea_regx_req, response) else "0"
self.fd["house_totalarea_max"] = self.fd["house_totalarea"]
self.fd["house_totalarea_min"] = "0"
self.fd["house_room1"] = self.houserhtnum(self.parseHtml(self.house_room1_regx, response)) if self.parseHtml(self.house_room1_regx, response) else "1"
self.fd["house_hall"] = "0"
self.fd["house_toilet"] = "0"
self.fd["house_floor"] = "0"
self.fd["house_topfloor"]= "0"
self.fd["house_age"] = "0"
house_support = self.parseHtml(self.house_support1_regx, response) if self.parseHtml(self.house_support1_regx, response) else "0"
house_support = house_support.replace('、',',')
self.fd["house_deposit"] = "0"
self.fd["paytype"] = "0"
self.fd["house_toward"] = "0"
self.fd["house_type"] = "1"
self.fd["house_fitment"] = "0"
self.fd["house_support"] = '0'
self.fd["house_support1"]= '0'#self.getsupport(house_support)
qzbeizhu_class = soup.find('div',{'class':'qzbeizhu'})
if qzbeizhu_class and qzbeizhu_class.p:
self.fd["house_desc"] = soup.find('div',{'class':'qzbeizhu'}).p.contents[0].strip()
self.fd["house_desc"] = re.sub("<.*?>|\n|\t|\r","",self.fd["house_desc"])
else:
self.fd["house_desc"] = ""
def buy(self,url):
cookiestore=cookielib.MozillaCookieJar()
request = urllib2.Request(url,None , getheader)
br = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiestore),urllib2.HTTPRedirectHandler())
response=br.open(request).read()
response=response.decode("GB18030").encode("UTF-8")
soup = BeautifulSoup(response)
if self.parseHtml(self.posttime_regx, response):
posttime = self.parseHtml(self.posttime_regx, response)
self.fd['posttime1'] =posttime
if posttime.find('天') != -1:
self.fd['posttime'] =int(posttime.replace('天',''))*24*3600
elif posttime.find('小时') != -1:
self.fd['posttime'] =int(posttime.replace('小时',''))*3600
elif posttime.find('分钟') != -1:
self.fd['posttime'] =int(posttime.replace('分钟',''))*60
elif posttime.find('秒') != -1:
self.fd['posttime'] =int(posttime.replace('秒',''))
else:
self.fd['posttime'] = int(time.time())
else:
self.fd["is_ok"] = False
return
info_class = soup.find('div',{'class':'info'})
info_class_str = str(info_class)
self.fd["house_title"] = info_class.h1.span.contents[0].strip() if info_class.h1.span else ""
tel = info_class.find('span',{'class':'tel'})
if tel and tel.font:
self.fd["owner_phone"] = tel.font.contents[0]
if tel.font.nextSibling:
self.fd["owner_name"] = tel.font.nextSibling.strip()
else:
self.fd["owner_name"] = ''
else:
self.fd['is_ok'] = False
return
if info_class.ul and info_class.ul.table and info_class.ul.table.td:
house_area = info_class.ul.table.td.contents[0].replace('[','')
self.fd["house_addr"]=house_area
if house_area.find(' ') != -1:
house_area = house_area.split(' ')
self.fd["borough_name"] =house_area[1]
self.fd["cityarea"]=house_area[0]
self.fd["borough_section"]=house_area[1]
else:
self.fd["borough_name"] =house_area
self.fd["cityarea"]= ''
self.fd["borough_section"]=''
else:
self.fd["borough_name"] =''
self.fd["cityarea"]= ''
self.fd["borough_section"]=''
self.fd["house_addr"]=''
house_class = soup.find('dl',{'class':'house'})
house_class_str = str(house_class)
self.fd["house_price"] = self.parseHtml(self.house_price_regx_buy, response) if self.parseHtml(self.house_price_regx_buy, response) else "0"
self.fd["house_price_min"] ="0"
self.fd["house_price_max"] =self.fd["house_price"]
self.fd["house_totalarea"] = self.parseHtml(self.house_totalarea_regx_buy, response) if self.parseHtml(self.house_totalarea_regx_buy, response) else "0"
self.fd["house_totalarea_max"] = "0"
self.fd["house_totalarea_min"] = self.fd["house_totalarea"]
self.fd["house_room"] = self.houserhtnum(self.parseHtml(self.house_room_regx_sell, info_class_str)) if self.parseHtml(self.house_room_regx_sell, info_class_str) else "0"
self.fd["house_hall"] = self.houserhtnum(self.parseHtml(self.house_hall_regx_sell, info_class_str)) if self.parseHtml(self.house_hall_regx_sell, info_class_str) else "0"
self.fd["house_toilet"] = self.houserhtnum(self.parseHtml(self.house_toilet_regx_sell, info_class_str)) if self.parseHtml(self.house_toilet_regx_sell, info_class_str) else "0"
self.fd["house_floor"] = "0"
self.fd["house_topfloor"] = "0"
self.fd["house_age"] = "0"
self.fd["belong"] = "0"
if re.search(ur'''期望楼层:</span>([\u4e00-\u9fa5]+)[\s]*</dd>''', unicode(response,"UTF-8")):
house_floor=re.search(ur'''期望楼层:</span>([\u4e00-\u9fa5]+)[\s]*</dd>''', unicode(response,"UTF-8"))
self.fd["house_floor"]=self.qiugouhousefloor(house_floor.group(1))
else:
self.fd["house_floor"]=""
if re.search('''配套设施:</span>(.*)</dt>''', response):
house_support=re.search('''配套设施:</span>(.*)</dt>''', response)
self.fd["house_support"]=self.getsupport(house_support.group(1))
else:
self.fd["house_support"]=""
if re.search(ur'''期望朝向:</span>([\u4e00-\u9fa5]+)[\s]*</dd>''', unicode(response,"UTF-8")):
house_toward=re.search(ur'''期望朝向:</span>([\u4e00-\u9fa5]+)[\s]*</dd>''', unicode(response,"UTF-8"))
self.fd["house_toward"]=self.qiugouhousetoward(house_toward.group(1))
else:
self.fd["house_toward"]=""
if re.search('''期望房龄:</span>(.*)年.*</dd>''', response):
house_age=re.search('''期望房龄:</span>(.*)年.*</dd>''', response)
self.fd["house_age"]=self.gethouseage(house_age.group(1))
else:
self.fd["house_age"]=""
self.fd["house_deposit"] = "0"
self.fd["house_toward"] = "0"
self.fd["house_type"] = "0"
self.fd["house_fitment"] = "0"
self.fd["house_support"] = "0"
beizhu_class = soup.find('p',{'class':'beizhu mt10'})
if beizhu_class:
self.fd["house_desc"] = str(beizhu_class).strip()
self.fd["house_desc"] = re.sub("<.*?>|\n|\t|\r","",self.fd["house_desc"])
self.fd["house_desc"] = self.fd["house_desc"].replace(' ','')
else:
self.fd["house_desc"] = ""
def sell(self,url):
cookiestore=cookielib.MozillaCookieJar()
request = urllib2.Request(url,None , getheader)
br = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiestore),urllib2.HTTPRedirectHandler())
page=br.open(request).read()
response=page and page.decode("GB18030").encode("UTF-8").replace('''<meta http-equiv="Content-Type" content="text/html; charset=gb2312" />''',"") or ""
soup = BeautifulSoup(response)
if self.parseHtml(self.posttime_regx, response):
posttime = self.parseHtml(self.posttime_regx, response)
self.fd['posttime1'] =posttime
if posttime.find('天') != -1:
self.fd['posttime'] =int(posttime.replace('天',''))*24*3600
elif posttime.find('小时') != -1:
self.fd['posttime'] =int(posttime.replace('小时',''))*3600
elif posttime.find('分钟') != -1:
self.fd['posttime'] =int(posttime.replace('分钟',''))*60
elif posttime.find('秒') != -1:
self.fd['posttime'] =int(posttime.replace('秒',''))
else:
self.fd['posttime'] = 0
else:
self.fd["is_ok"] = False
return
info_class = soup.find('div',{'class':'info'})
info_class_str = str(info_class)
tel = info_class.find('span',{'class':'tel'})
if tel and tel.span:
self.fd["owner_phone"] = tel.span.contents[0]
else:
self.fd["owner_phone"]=""
self.fd["is_ok"] = False
return
self.fd["house_title"] = info_class.h1.span.contents[0].strip() if info_class.h1.span else ""
self.fd["owner_name"] = self.parseHtml(self.owner_name_regx, response) if self.parseHtml(self.owner_name_regx, response) else ""
#if self.parseHtml(self.owner_phone_regx, response):
#self.fd["owner_phone"] = self.parseHtml(self.owner_phone_regx, response)
#else:
#if re.search(self.owner_phone1_regx, response):
#owner_phone=re.search(self.owner_phone1_regx, response)
#self.fd["owner_phone"]=owner_phone.group(1)+"-"+owner_phone.group(2)
#else:
#self.fd["owner_phone"]=""
#self.fd["is_ok"] = False
#return
house_class = soup.find('dl',{'class':'house'})
house_class_str = str(house_class)
if house_class:
house_class_str = str(house_class)
self.fd["borough_name"] = house_class.strong.contents[0] if house_class.strong else ''
self.fd["cityarea"]= self.parseHtml(self.cityarea_regx, response) if self.parseHtml(self.cityarea_regx, response) else ""
self.fd["borough_section"]= self.parseHtml(self.borough_section_regx, response) if self.parseHtml(self.borough_section_regx, response) else ""
else:
self.fd["borough_name"] =''
self.fd["cityarea"]=""
self.fd["borough_section"]=""
if self.parseHtml(self.house_addr_regx, response):
self.fd["house_addr"]= self.parseHtml(self.house_addr_regx, response)
else:
self.fd["house_addr"]= self.parseHtml(self.house_addr1_regx, response) if self.parseHtml(self.house_addr1_regx, response) else ""
self.fd["house_price"] = self.parseHtml(self.house_price_regx_sell, response) if self.parseHtml(self.house_price_regx_sell, response) else "0"
self.fd["house_totalarea"] = self.parseHtml(self.house_totalarea_regx_sell, response) if self.parseHtml(self.house_totalarea_regx_sell, response) else "0"
self.fd["house_room"] = self.houserhtnum(self.parseHtml(self.house_room_regx_sell, info_class_str)) if self.parseHtml(self.house_room_regx_sell, info_class_str) else "0"
self.fd["house_hall"] = self.houserhtnum(self.parseHtml(self.house_hall_regx_sell, info_class_str)) if self.parseHtml(self.house_hall_regx_sell, info_class_str) else "0"
self.fd["house_toilet"] = self.houserhtnum(self.parseHtml(self.house_toilet_regx_sell, info_class_str)) if self.parseHtml(self.house_toilet_regx_sell, info_class_str) else "0"
self.fd["house_floor"] = self.parseHtml(self.house_floor_regx, response) if self.parseHtml(self.house_floor_regx, response) else "0"
self.fd["house_topfloor"] = self.parseHtml(self.house_topfloor_regx, response) if self.parseHtml(self.house_topfloor_regx, response) else "0"
self.fd["house_age"] = self.parseHtml(self.house_age_regx, response) if self.parseHtml(self.house_age_regx, response) else "0"
self.fd["belong"] = self.getbelong(self.parseHtml(self.belong_regx, response)) if self.parseHtml(self.belong_regx, response) else "0"
house_toward = self.parseHtml(self.house_toward_regx, response) if self.parseHtml(self.house_toward_regx, response) else "0"
house_type = self.parseHtml(self.house_type_regx, response) if self.parseHtml(self.house_type_regx, response) else "0"
house_fitment = self.parseHtml(self.house_fitment_regx, response) if self.parseHtml(self.house_fitment_regx, response) else "0"
house_support = self.parseHtml(self.house_support_regx, response) if self.parseHtml(self.house_support_regx, response) else "0"
self.fd["house_deposit"] = "0"
self.fd["house_toward"] = self.getforward(house_toward)
self.fd["house_type"] = self.gethousetype(house_type)
self.fd["house_fitment"] = self.getfitment(house_fitment)
self.fd["house_support"] = self.getsupport(house_support)
beizhu_class = soup.find('div',{'class':'beizhu'})
if beizhu_class and beizhu_class.div:
self.fd["house_desc"] = str(beizhu_class.div).strip()
self.fd["house_desc"] = re.sub("<.*?>|\n|\t|\r","",self.fd["house_desc"])
self.fd["house_desc"] = self.fd["house_desc"].replace(' ','')
else:
self.fd["house_desc"] = ""
def parseHtml(self,regx,response):
if re.search(regx, response):
return re.search(regx, response).group(1)
else:
return None
def rent(self,url):
cookiestore=cookielib.MozillaCookieJar()
request = urllib2.Request(url,None , getheader)
br = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiestore),urllib2.HTTPRedirectHandler())
response=br.open(request).read()
response=response.decode("GB18030").encode("UTF-8")
soup = BeautifulSoup(response)
if self.parseHtml(self.posttime_regx, response):
posttime = self.parseHtml(self.posttime_regx, response)
self.fd['posttime1'] =posttime
if posttime.find('天') != -1:
self.fd['posttime'] =int(posttime.replace('天',''))*24*3600
elif posttime.find('小时') != -1:
self.fd['posttime'] =int(posttime.replace('小时',''))*3600
elif posttime.find('分钟') != -1:
self.fd['posttime'] =int(posttime.replace('分钟',''))*60
elif posttime.find('秒') != -1:
self.fd['posttime'] =int(posttime.replace('秒',''))
else:
self.fd['posttime'] = 0
else:
self.fd["is_ok"] = False
return
info_class = soup.find('div',{'class':'info'})
info_class_str = str(info_class)
self.fd["house_title"] = info_class.h1.span.contents[0].strip() if info_class.h1.span else ""
self.fd["owner_name"] = self.parseHtml(self.owner_name_regx, response) if self.parseHtml(self.owner_name_regx, response) else ""
if self.parseHtml(self.owner_phone_regx, response):
self.fd["owner_phone"] = self.parseHtml(self.owner_phone_regx, response)
else:
if re.search(self.owner_phone1_regx, response):
owner_phone=re.search(self.owner_phone1_regx, response)
self.fd["owner_phone"]=owner_phone.group(1)+"-"+owner_phone.group(2)
else:
self.fd["owner_phone"]=""
self.fd["is_ok"] = False
return
house_class = soup.find('dl',{'class':'house'})
house_class_str = str(house_class)
if house_class:
house_class_str = str(house_class)
self.fd["borough_name"] = house_class.strong.contents[0] if house_class.strong else ''
self.fd["cityarea"]= self.parseHtml(self.cityarea_regx, response) if self.parseHtml(self.cityarea_regx, response) else ""
self.fd["borough_section"]= self.parseHtml(self.borough_section_regx, response) if self.parseHtml(self.borough_section_regx, response) else ""
else:
self.fd["borough_name"] =''
self.fd["cityarea"]=""
self.fd["borough_section"]=""
if self.parseHtml(self.house_addr_regx, response):
self.fd["house_addr"]= self.parseHtml(self.house_addr_regx, response)
else:
self.fd["house_addr"]= self.parseHtml(self.house_addr1_regx, response) if self.parseHtml(self.house_addr1_regx, response) else ""
self.fd["house_price"] = self.parseHtml(self.house_price_regx_rent, info_class_str) if self.parseHtml(self.house_price_regx_rent, info_class_str) else "0"
self.fd["house_totalarea"] = self.parseHtml(self.house_totalarea_regx_rent, info_class_str) if self.parseHtml(self.house_totalarea_regx_rent, info_class_str) else "0"
self.fd["house_room"] = self.parseHtml(self.house_room_regx, info_class_str) if self.parseHtml(self.house_room_regx, info_class_str) else "0"
self.fd["house_hall"] = self.parseHtml(self.house_hall_regx, info_class_str) if self.parseHtml(self.house_hall_regx, info_class_str) else "0"
self.fd["house_toilet"] = self.parseHtml(self.house_toilet_regx, info_class_str) if self.parseHtml(self.house_toilet_regx, info_class_str) else "0"
self.fd["house_floor"] = self.parseHtml(self.house_floor_regx, response) if self.parseHtml(self.house_floor_regx, response) else "0"
self.fd["house_topfloor"] = self.parseHtml(self.house_topfloor_regx, response) if self.parseHtml(self.house_topfloor_regx, response) else "0"
self.fd["house_age"] = self.parseHtml(self.house_age_regx, response) if self.parseHtml(self.house_age_regx, response) else "0"
paytype = self.parseHtml(self.paytype_regx, info_class_str) if self.parseHtml(self.paytype_regx, info_class_str) else "5"
house_toward = self.parseHtml(self.house_toward_regx, response) if self.parseHtml(self.house_toward_regx, response) else "0"
house_type = self.parseHtml(self.house_type_regx, response) if self.parseHtml(self.house_type_regx, response) else "0"
house_fitment = self.parseHtml(self.house_fitment_regx, response) if self.parseHtml(self.house_fitment_regx, response) else "0"
house_support = self.parseHtml(self.house_support_regx, response) if self.parseHtml(self.house_support_regx, response) else "0"
self.fd["house_deposit"]=self.getpaytype(paytype)
self.fd["house_toward"] = self.getforward(house_toward)
self.fd["house_type"] = self.gethousetype(house_type)
self.fd["house_fitment"] = self.getfitment(house_fitment)
self.fd["house_support"] = self.getsupport(house_support)
self.fd["house_desc"] = self.parseHtml(self.house_desc_regx, response) if self.parseHtml(self.house_desc_regx, response) else ""
self.fd["house_desc"] = re.sub("<.*?>|\n|\t|\r","",self.fd["house_desc"])
def getfitment(self,str):
fitment={
"毛坯":"1",
"简单装修":"2",
"中等装修":"3",
"精装修":"4",
"豪华装修":"5",
}
return fitment.get(str) and fitment.get(str) or "3"
def getforward(self,str):
forward={
"南北通透":"1",
"东西向":"2",
"朝南":"3",
"朝比":"4",
"朝东":"5",
"朝西":"6",
"不限":"0",
}
return forward.get(str) and forward.get(str) or "0"
def getsupport(self,str):
support={
"煤气/天然气":"1",
"暖气":"2",
"车位/车库":"3",
"电梯":"4",
"储藏室/地下室":"5",
"花园/小院":"6",
"露台":"7",
"阁楼":"8",
"宽带":"1",
"空调":"2",
"冰箱":"3",
"洗衣机":"4",
"热水器":"5",
"厨具":"6",
"床":"7",
"家具":"8",
"有线电视":"9",
"微波炉":"10",
"煤气/天然气":"11",
"暖气":"12",
"车位":"13",
"电梯":"14",
"露台/花园":"15",
"储藏室/地下室":"16",
"电话":"17",
}
ss=str.split(",")
return ",".join([ support.get(s) and support.get(s) or "" for s in ss])
def gethousetype(self ,str):
housetype={
"普通住宅":"1",
"住宅":"1",
"酒店式公寓":"2",
"商住楼":"3",
"拆迁安置房":"4",
"老新村":"5",
}
return housetype.get(str) and housetype.get(str) or""
def gethousestruct(self):
housestruct={
"平层":"1",
"复式":"2",
"跃层":"3",
"错层":"4",
"开间":"5",
}
def gethouseother(self):
houseother={
"拎包入住":"1",
"家电齐全":"2",
"可上网":"3",
"可做饭":"4",
"可洗澡":"5",
"空调房":"6",
"可看电视":"7",
"有暖气":"8",
"有车位":"9",
}
def getrent_type(self,str):
rent_type={
"整租":"1",
"合租":"2",
"合租单间":"3",
"合租床位":"4",
}
return rent_type.get(str) and rent_type.get(str)or "1"
def getpaytype(self,str):
paytype={
"月付":"1",
"季付":"2",
"半年付":"3",
"年付":"4",
"面议":"5",
}
return paytype.get(str) and paytype.get(str) or "5"
def getbelong(self,str):
belong={
"个人产权":"1",
"使用权":"2",
}
return belong.get(str) and belong.get(str) or "1"
def gethouseage(self,str):
age={
"2":"1",
"2-5":"2",
"5-10":"3",
"10":"4",
}
return age.get(str) and age.get(str) or "2"
def houserhtnum(self,str):
num={
"一":"1",
"二":"2",
"两":"2",
"三":"3",
"四":"4",
"五":"5",
"六":"6",
"七":"7",
"八":"8",
"九":"9",
}
return num.get(str) and num.get(str) or "0"
def qiugouhousefloor(self,str):
floor={
"不限":"0",
"底层":"1",
"中低层":"2",
"高层":"3",
"顶层":"4",
"地下室":"5",
}
return floor.get(str) or floor.get(str) or "0"
def qiugouhousetoward(self,str):
twd={
"不限":"0",
"南北":"1",
"东西":"2",
"东南":"3",
"东北":"4",
"西南":"5",
"西北":"6",
"东":"7",
"西":"8",
"南":"9",
"北":"10",
}
return twd.get(str) or twd.get(str) or "0"
def extractDict(self):
if 0:#checkPath(homepath,self.folder,self.urls):
return None
else:
self.fd["citycode"] = self.citycode
self.fd["cityname"] = citynameDict_sf[self.citycode]
self.fd["c"]="houseapi"
self.fd["a"]="savehouse"
self.fd["is_checked"] = 1
self.fd["web_flag"] = "sf"
self.fd["is_ok"] = True
print self.urls
try:
if self.kind=="3":
self.buy(self.urls)
self.fd["house_flag"] = 3
elif self.kind=="4":
self.require(self.urls)
self.fd["house_flag"] = 4
elif self.kind=="2":
self.rent(self.urls)
self.fd["house_flag"] = 2
elif self.kind=="1":
self.sell(self.urls)
self.fd["house_flag"] = 1
makePath(homepath,self.folder,self.urls)
except Exception,e:
print e
pass
else:
if not self.fd["is_ok"]:
return
if not self.fd["is_checked"]:
printRsult(self.fd,self.kind)
self.fd['posttime'] = int(time.time()) - self.fd['posttime']
#print "%s %s %s %s"%(self.citycode, self.kind ,time.strftime("%Y-%m-%d %H:%M:%S",self.fd['posttime']), self.urls)
return self.fd
def getDict(d):
lc=LinkCrawl(d["citycode"],d["kind"])
clinks=lc.runme()
cc=ContentCrawl(clinks,d["citycode"],d["kind"])
cc.extractDict()
class fetchData(threading.Thread):
def __init__(self,d):
threading.Thread.__init__(self)
self.d=d
def run(self):
lc=LinkCrawl(self.d["citycode"],self.d["kind"])
clinks=lc.runme()
cc=ContentCrawl(clinks,self.d["citycode"],self.d["kind"])
cc.extractDict()
def getLinks(d):
lc=LinkCrawl(d["citycode"],d["kind"],d["st1"])
while True:
lc.runme()
del gc.garbage[:]
time.sleep(int(d["st2"]))
def getContent(clinks,citycode,kind):
cc=ContentCrawl(clinks,citycode,kind)
fd=cc.extractDict()
res=""
try:
res=postHost(fd)
except Exception,e:
res=e
print res
msglogger.info("%s|%s|%s"%(clinks,res,""))
return fd
if __name__=="__main__":
lc=LinkCrawl(citycode="wuxi",kind="4")
lc.runme()
#cc=ContentCrawl("http://esf.wuxi.soufun.com/chushou/1_119888237_-1.htm#p=1",citycode="wuxi",kind="1")
#cc=ContentCrawl("http://rent.wuxi.soufun.com/chuzu/1_49544277_-1.htm",citycode="wuxi",kind="2")
cc=ContentCrawl("http://esf.wuxi.soufun.com/qiugou/1_860333_-1.htm",citycode="wuxi",kind="3")
#cc=ContentCrawl("http://rent.wuxi.soufun.com/qiuzu/1_55103674_-1.htm",citycode="wuxi",kind="4")
cc.extractDict()
# lf=file("link.log")
# for link in lf.readlines():
# cc=ContentCrawl(link,citycode="suzhou",kind="3")
# cc.extractDict()
# getDict({"citycode":"suzhou","kind":"1",}) | ptphp/PyLib | src/webpy1/src/jjrspider/soufun.py | Python | apache-2.0 | 39,564 |
import glob
import logging
import os
import pandas as pd
import minst.utils as utils
logger = logging.getLogger(__name__)
NAME = 'philharmonia'
ONSET_DIR = os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir,
"data", "onsets", NAME)
def parse(filename):
"""Convert phil path to codes/parameters.
Parameters
----------
filename : full path.
Returns
-------
parts : tuple, len=5
From the filename, the following parts:
(instrument, note, duration, dynamic, articulation).
"""
audio_file_name = utils.filebase(filename)
(instrument, note, duration, dynamic,
articulation) = audio_file_name.split('_')
return instrument, note, duration, dynamic, articulation
def collect(base_dir, articulations=["normal", "vibrato"],
onset_dir=ONSET_DIR):
"""Convert a base directory of Philharmonia files to a pandas dataframe.
Parameters
----------
base_dir : str
Full path to the base RWC directory.
articulations : list of str
Articulations over which to filter the data.
onset_dir : str
Path at which to look for onset data.
Returns
-------
pandas.DataFrame
With the following columns:
id
audio_file
dataset
instrument
note
dynamic
onsets_file
"""
logger.info("Scanning {} for files.".format(base_dir))
root_dir = os.path.join(base_dir, "www.philharmonia.co.uk",
"assets/audio/samples")
# These files download as {instrument}/{instrument}.zip.
zip_fmt = os.path.join(root_dir, "*/*.zip")
zip_files = glob.glob(zip_fmt)
logger.debug("Found {} zipfiles at {}".format(len(zip_files), zip_fmt))
utils.unzip_files(zip_files)
articulation_skipped = []
# Need this to be iterable.
articulations = articulations if articulations else []
indexes = []
records = []
# MP3s are extracted automatically as {instrument}/{instrument}/{fbase}.mp3
audio_path_fmt = os.path.join(root_dir, "*/*/*.mp3")
for audio_file_path in glob.glob(audio_path_fmt):
(instrument, note, duration, dynamic,
articulation) = parse(audio_file_path)
art_conds = [not bool(articulations),
any([x in articulation for x in articulations])]
if any(art_conds):
uid = utils.generate_id(NAME, audio_file_path.split(base_dir)[-1])
onsets = utils.find_onset_file_from_uid(uid, onset_dir)
indexes.append(uid)
records.append(
dict(audio_file=audio_file_path,
dataset=NAME,
instrument=instrument,
note=note,
dynamic=dynamic,
onsets_file=onsets))
else:
articulation_skipped += [audio_file_path]
logger.info("Using {} files from {}.".format(len(records), NAME))
logger.warn(
utils.colorize("Skipped {} file(s) with articulation not in {}"
.format(len(articulation_skipped), articulations),
"red"))
with open("log_philharmonia_skipped.txt", 'w') as fh:
fh.write("\n".join(articulation_skipped))
return pd.DataFrame(records, index=indexes)
| ejhumphrey/minst-dataset | minst/sources/philharmonia.py | Python | isc | 3,404 |
# https://pythonhosted.org/setuptools/setuptools.html#namespace-packages
__import__('pkg_resources').declare_namespace(__name__)
import mimetypes
# http://docs.python-requests.org/en/latest/user/advanced/#post-multiple-multipart-encoded-files
def encode_multipart_formdata(args):
data = {}
files = []
for (key, value) in args.items():
if hasattr(value, 'read'):
file_name = None
content_type = None
if hasattr(value, 'name'):
file_name = value.name
elif args.has_key('title'):
file_name = args['title']
else:
file_name = 'unknown'
content_type = get_content_type(file_name)
f = (key, (file_name, value, content_type))
files.append(f)
else:
data[key] = value
# This is so that we can maintain backwards compatibility with
# the pre- requests (httplib) versions of this library
# (20150306/copea)
encoded = {}
if len(files):
encoded['files'] = files
if len(data.keys()):
encoded['data'] = data
return encoded
def encode_urlencode(data):
return {'data': data}
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
| straup/py-flamework-api | flamework/api/request/__init__.py | Python | bsd-3-clause | 1,157 |
# print 'hello world!'
print 'hello world!'
| hsadler/programming-language-examples | python/hello_world.py | Python | mit | 46 |
# -*- coding: utf-8 -*-
"""
All code for scraping images and videos from posted
links go in this file.
"""
import BeautifulSoup
import requests
from urlparse import urlparse, urlunparse, urljoin
img_extensions = ['jpg', 'jpeg', 'gif', 'png', 'bmp']
def make_abs(url, img_src):
domain = urlparse(url).netloc
scheme = urlparse(url).scheme
baseurl = scheme + '://' + domain
return urljoin(baseurl, img_src)
def clean_url(url):
frag = urlparse(url)
frag = frag._replace(query='', fragment='')
return urlunparse(frag)
def get_top_img(url, timeout=4):
"""
Nothing fancy here, we merely check if the page author
set a designated image or if the url itself is an image.
This method could be mutch better but we are favoring ease
of installation and simplicity of speed.
"""
if not url:
return None
url = clean_url(url)
# if the url is referencing an img itself, return it
if url.split('.')[-1].lower() in img_extensions:
return url
try:
html = requests.get(url, timeout=timeout).text
soup = BeautifulSoup.BeautifulSoup(html)
og_image = (soup.find('meta', property='og:image') or
soup.find('meta', attrs={'name': 'og:image'}))
if og_image and og_image['content']:
src_url = og_image['content']
return make_abs(url, src_url)
# <link rel="image_src" href="http://...">
thumbnail_spec = soup.find('link', rel='image_src')
if thumbnail_spec and thumbnail_spec['href']:
src_url = thumbnail_spec['href']
return make_abs(url, src_url)
except Exception, e:
print 'FAILED WHILE EXTRACTING THREAD IMG', str(e)
return None
return None
| codelucas/flask_reddit | flask_reddit/media.py | Python | mit | 1,763 |
# Copyright (c) 2011 The Chromium OS Authors.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import os
import gitutil
import terminal
# Series-xxx tags that we understand
valid_series = ['to', 'cc', 'version', 'changes', 'prefix', 'notes', 'name'];
class Series(dict):
"""Holds information about a patch series, including all tags.
Vars:
cc: List of aliases/emails to Cc all patches to
commits: List of Commit objects, one for each patch
cover: List of lines in the cover letter
notes: List of lines in the notes
changes: (dict) List of changes for each version, The key is
the integer version number
"""
def __init__(self):
self.cc = []
self.to = []
self.commits = []
self.cover = None
self.notes = []
self.changes = {}
# These make us more like a dictionary
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
return self[name]
def AddTag(self, commit, line, name, value):
"""Add a new Series-xxx tag along with its value.
Args:
line: Source line containing tag (useful for debug/error messages)
name: Tag name (part after 'Series-')
value: Tag value (part after 'Series-xxx: ')
"""
# If we already have it, then add to our list
if name in self:
values = value.split(',')
values = [str.strip() for str in values]
if type(self[name]) != type([]):
raise ValueError("In %s: line '%s': Cannot add another value "
"'%s' to series '%s'" %
(commit.hash, line, values, self[name]))
self[name] += values
# Otherwise just set the value
elif name in valid_series:
self[name] = value
else:
raise ValueError("In %s: line '%s': Unknown 'Series-%s': valid "
"options are %s" % (commit.hash, line, name,
', '.join(valid_series)))
def AddCommit(self, commit):
"""Add a commit into our list of commits
We create a list of tags in the commit subject also.
Args:
commit: Commit object to add
"""
commit.CheckTags()
self.commits.append(commit)
def ShowActions(self, args, cmd, process_tags):
"""Show what actions we will/would perform
Args:
args: List of patch files we created
cmd: The git command we would have run
process_tags: Process tags as if they were aliases
"""
col = terminal.Color()
print 'Dry run, so not doing much. But I would do this:'
print
print 'Send a total of %d patch%s with %scover letter.' % (
len(args), '' if len(args) == 1 else 'es',
self.get('cover') and 'a ' or 'no ')
# TODO: Colour the patches according to whether they passed checks
for upto in range(len(args)):
commit = self.commits[upto]
print col.Color(col.GREEN, ' %s' % args[upto])
cc_list = []
if process_tags:
cc_list += gitutil.BuildEmailList(commit.tags)
cc_list += gitutil.BuildEmailList(commit.cc_list)
# Skip items in To list
if 'to' in self:
try:
map(cc_list.remove, gitutil.BuildEmailList(self.to))
except ValueError:
pass
for email in cc_list:
if email == None:
email = col.Color(col.YELLOW, "<alias '%s' not found>"
% tag)
if email:
print ' Cc: ',email
print
for item in gitutil.BuildEmailList(self.get('to', '<none>')):
print 'To:\t ', item
for item in gitutil.BuildEmailList(self.cc):
print 'Cc:\t ', item
print 'Version: ', self.get('version')
print 'Prefix:\t ', self.get('prefix')
if self.cover:
print 'Cover: %d lines' % len(self.cover)
if cmd:
print 'Git command: %s' % cmd
def MakeChangeLog(self, commit):
"""Create a list of changes for each version.
Return:
The change log as a list of strings, one per line
Changes in v4:
- Jog the dial back closer to the widget
Changes in v3: None
Changes in v2:
- Fix the widget
- Jog the dial
etc.
"""
final = []
need_blank = False
for change in sorted(self.changes, reverse=True):
out = []
for this_commit, text in self.changes[change]:
if commit and this_commit != commit:
continue
out.append(text)
line = 'Changes in v%d:' % change
have_changes = len(out) > 0
if have_changes:
out.insert(0, line)
else:
out = [line + ' None']
if need_blank:
out.insert(0, '')
final += out
need_blank = have_changes
if self.changes:
final.append('')
return final
def DoChecks(self):
"""Check that each version has a change log
Print an error if something is wrong.
"""
col = terminal.Color()
if self.get('version'):
changes_copy = dict(self.changes)
for version in range(1, int(self.version) + 1):
if self.changes.get(version):
del changes_copy[version]
else:
if version > 1:
str = 'Change log missing for v%d' % version
print col.Color(col.RED, str)
for version in changes_copy:
str = 'Change log for unknown version v%d' % version
print col.Color(col.RED, str)
elif self.changes:
str = 'Change log exists, but no version is set'
print col.Color(col.RED, str)
def MakeCcFile(self, process_tags):
"""Make a cc file for us to use for per-commit Cc automation
Args:
process_tags: Process tags as if they were aliases
Return:
Filename of temp file created
"""
# Look for commit tags (of the form 'xxx:' at the start of the subject)
fname = '/tmp/patman.%d' % os.getpid()
fd = open(fname, 'w')
for commit in self.commits:
list = []
if process_tags:
list += gitutil.BuildEmailList(commit.tags)
list += gitutil.BuildEmailList(commit.cc_list)
print >>fd, commit.patch, ', '.join(list)
fd.close()
return fname
def AddChange(self, version, commit, info):
"""Add a new change line to a version.
This will later appear in the change log.
Args:
version: version number to add change list to
info: change line for this version
"""
if not self.changes.get(version):
self.changes[version] = []
self.changes[version].append([commit, info])
def GetPatchPrefix(self):
"""Get the patch version string
Return:
Patch string, like 'RFC PATCH v5' or just 'PATCH'
"""
version = ''
if self.get('version'):
version = ' v%s' % self['version']
# Get patch name prefix
prefix = ''
if self.get('prefix'):
prefix = '%s ' % self['prefix']
return '%sPATCH%s' % (prefix, version)
| MarvellEmbeddedProcessors/u-boot-armada38x | tools/patman/series.py | Python | gpl-2.0 | 8,520 |
import os
import datetime
from utils.util import run_command
__author__ = 'maa'
class MsBuilder:
def __init__(self, msbuild):
if msbuild == None:
self.msbuild = r"C:\Windows\Microsoft.NET\Framework64\v4.0.30319\MSBuild.exe"
else:
self.msbuild = msbuild
def build_with_params(self, csprojPath, targets, properties):
if not os.path.isfile(self.msbuild):
raise Exception('MsBuild.exe not found. path = ' + self.msbuild)
start = datetime.datetime.now()
print('STARTED BUILD - ' + start.strftime('%Y-%m-%d %H:%M:%S'))
params = [self.msbuild, csprojPath]
params.append('/t:' + ';'.join(targets))
params.append('/p:' + ';'.join(properties))
return run_command(params)
def build(self, csprojPath, args):
if not os.path.isfile(self.msbuild):
raise Exception('MsBuild.exe not found. path = ' + self.msbuild)
start = datetime.datetime.now()
print('STARTED BUILD - ' + start.strftime('%Y-%m-%d %H:%M:%S'))
params = [self.msbuild, csprojPath] + list(args)
return run_command(params)
def get_files_from_project_bin_folder(self, csproj, configuration, do_return_full_paths=False):
name = os.path.dirname(os.path.realpath(csproj))
bin_config_path = os.path.join(name, 'bin', configuration)
files = os.listdir(bin_config_path)
if not do_return_full_paths:
return files
files_full_path = list()
for file in files:
files_full_path.append(os.path.join(bin_config_path, file))
return files_full_path
| amatkivskiy/baidu | baidu/utils/msbuilder.py | Python | apache-2.0 | 1,654 |
# Copyright 2017-20 ForgeFlow S.L. (www.forgeflow.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import tagged
from odoo.addons.test_mail.data.test_mail_data import MAIL_TEMPLATE
from odoo.addons.test_mail.tests.test_mail_gateway import TestMailgateway
@tagged("mail_gateway")
class TestFetchmailIncomingLog(TestMailgateway):
@classmethod
def setUpClass(cls):
super(TestFetchmailIncomingLog, cls).setUpClass()
cls.fetchmail_server = cls.env["fetchmail.server"].create(
{"name": "Test Fetchmail Server", "server_type": "imap"}
)
def test_message_process(self):
email_from = "[email protected]"
to_email = "[email protected]"
msg_id = "Test log message to process"
with self.assertRaises(ValueError):
mail = MAIL_TEMPLATE.format(
to=to_email,
email_from=email_from,
cc="",
subject="testing",
extra="",
msg_id=msg_id,
)
self.env["mail.thread"].with_context(
{"fetchmail_server_id": self.fetchmail_server.id}
).message_process(None, mail)
| OCA/server-tools | fetchmail_incoming_log/tests/test_fetchmail_incoming_log.py | Python | agpl-3.0 | 1,226 |
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import json
import inflection
import re
from genericpath import exists, isfile
from os.path import join, dirname, expanduser
from os import mkdir
from mycroft.util.json_helper import load_commented_json
from mycroft.util.log import LOG
__author__ = 'seanfitz, jdorleans'
DEFAULT_CONFIG = join(dirname(__file__), 'jarbas.conf')
SYSTEM_CONFIG = '/etc/jarbas/jarbas.conf'
USER_CONFIG = join(expanduser('~'), '.jarbas/jarbas.conf')
REMOTE_CONFIG = "mycroft.ai"
RUNTIME_CONFIG = join(dirname(__file__), 'jarbas_runtime.conf')
load_order = [DEFAULT_CONFIG, REMOTE_CONFIG, SYSTEM_CONFIG, USER_CONFIG,
RUNTIME_CONFIG]
class ConfigurationLoader(object):
"""
A utility for loading Mycroft configuration files.
Mycroft configuration comes from four potential locations:
* Defaults found in 'mycroft.conf' in the code
* Remote settings (coming from home.mycroft.ai)
* System settings (typically found at /etc/mycroft/mycroft.conf
* User settings (typically found at /home/<user>/.mycroft/mycroft.conf
These get loaded in that order on top of each other. So a value specified
in the Default would be overridden by a value with the same name found
in the Remote. And a value in the Remote would be overridden by a value
set in the User settings. Not all values exist at all levels.
See comments in the 'mycroft.conf' for more information about specific
settings and where they reside.
Note:
Values are overridden by name. This includes all data under that name,
so you if a value contains a complex structure, you cannot specify
only a single component of that structure -- you have to override the
entire structure.
"""
@staticmethod
def init_config(config=None):
if not config:
return {}
return config
@staticmethod
def init_locations(locations=None, keep_user_config=True):
if not locations:
locations = [DEFAULT_CONFIG, SYSTEM_CONFIG, USER_CONFIG]
elif keep_user_config:
locations += [USER_CONFIG]
return locations
@staticmethod
def validate(config=None, locations=None):
if not (isinstance(config, dict) and isinstance(locations, list)):
LOG.error("Invalid configuration data type.")
LOG.error("Locations: %s" % locations)
LOG.error("Configuration: %s" % config)
raise TypeError
@staticmethod
def load(config=None, locations=None, keep_user_config=True):
"""
Loads default or specified configuration files
"""
config = ConfigurationLoader.init_config(config)
locations = ConfigurationLoader.init_locations(locations,
keep_user_config)
ConfigurationLoader.validate(config, locations)
for location in locations:
config = ConfigurationLoader.__load(config, location)
return config
@staticmethod
def merge_conf(base, delta):
"""
Recursively merging configuration dictionaries.
Args:
base: Target for merge
delta: Dictionary to merge into base
"""
for k, dv in delta.iteritems():
bv = base.get(k)
if isinstance(dv, dict) and isinstance(bv, dict):
ConfigurationLoader.merge_conf(bv, dv)
else:
base[k] = dv
@staticmethod
def __load(config, location):
if exists(location) and isfile(location):
try:
ConfigurationLoader.merge_conf(
config, load_commented_json(location))
LOG.debug("Configuration '%s' loaded" % location)
except Exception, e:
LOG.error("Error loading configuration '%s'" % location)
LOG.error(repr(e))
else:
LOG.debug("Configuration '%s' not found" % location)
return config
class RemoteConfiguration(object):
"""
map remote configuration properties to
config in the [core] config section
"""
IGNORED_SETTINGS = ["uuid", "@type", "active", "user", "device"]
WEB_CONFIG_CACHE = '/opt/jarbas/web_config_cache.json'
@staticmethod
def validate(config):
if not (config and isinstance(config, dict)):
LOG.error("Invalid configuration: %s" % config)
raise TypeError
@staticmethod
def load(config=None):
RemoteConfiguration.validate(config)
update = config.get("server", {}).get("update")
if update:
try:
from mycroft.api import DeviceApi
api = DeviceApi()
setting = api.find_setting()
location = api.find_location()
if location:
setting["location"] = location
RemoteConfiguration.__load(config, setting)
RemoteConfiguration.__store_cache(setting)
except Exception as e:
LOG.warning("Failed to fetch remote configuration: %s" %
repr(e))
RemoteConfiguration.__load_cache(config)
else:
LOG.debug("Remote configuration not activated.")
return config
@staticmethod
def __load(config, setting):
for k, v in setting.iteritems():
if k not in RemoteConfiguration.IGNORED_SETTINGS:
# Translate the CamelCase values stored remotely into the
# Python-style names used within mycroft-core.
key = inflection.underscore(re.sub(r"Setting(s)?", "", k))
if isinstance(v, dict):
config[key] = config.get(key, {})
RemoteConfiguration.__load(config[key], v)
elif isinstance(v, list):
if key not in config:
config[key] = {}
RemoteConfiguration.__load_list(config[key], v)
else:
config[key] = v
@staticmethod
def __store_cache(setting):
"""
Cache the received settings locally. The cache will be used if
the remote is unreachable to load settings that are as close
to the user's as possible
"""
config = {}
# Remove server specific entries
RemoteConfiguration.__load(config, setting)
with open(RemoteConfiguration.WEB_CONFIG_CACHE, 'w') as f:
json.dump(config, f)
@staticmethod
def __load_cache(config):
"""
Load cache from file
"""
LOG.info("Using cached configuration if available")
ConfigurationLoader.load(config,
[RemoteConfiguration.WEB_CONFIG_CACHE],
False)
@staticmethod
def __load_list(config, values):
for v in values:
module = v["@type"]
if v.get("active"):
config["module"] = module
config[module] = config.get(module, {})
RemoteConfiguration.__load(config[module], v)
class ConfigurationManager(object):
"""
Static management utility for accessing the cached configuration.
This configuration is periodically updated from the remote server
to keep in sync.
"""
__config = None
__listener = None
@staticmethod
def instance():
"""
The cached configuration.
Returns:
dict: A dictionary representing the Mycroft configuration
"""
return ConfigurationManager.get()
@staticmethod
def init(ws):
# Start listening for configuration update events on the messagebus
ConfigurationManager.__listener = _ConfigurationListener(ws)
@staticmethod
def load_defaults():
for location in load_order:
LOG.info("Loading configuration: " + location)
if location == REMOTE_CONFIG:
RemoteConfiguration.load(ConfigurationManager.__config)
else:
ConfigurationManager.__config = ConfigurationLoader.load(
ConfigurationManager.__config, [location])
return ConfigurationManager.__config
@staticmethod
def load_local(locations=None, keep_user_config=True):
return ConfigurationLoader.load(ConfigurationManager.get(), locations,
keep_user_config)
@staticmethod
def load_internal(config):
LOG.info("Updating config internally")
ConfigurationManager.update(config)
@staticmethod
def load_remote():
if not ConfigurationManager.__config:
ConfigurationManager.__config = ConfigurationLoader.load()
return RemoteConfiguration.load(ConfigurationManager.__config)
@staticmethod
def get(locations=None):
"""
Get cached configuration.
Returns:
dict: A dictionary representing the Mycroft configuration
"""
if not ConfigurationManager.__config:
ConfigurationManager.load_defaults()
if locations:
ConfigurationManager.load_local(locations)
return ConfigurationManager.__config
@staticmethod
def update(config):
"""
Update cached configuration with the new ``config``.
"""
if not ConfigurationManager.__config:
ConfigurationManager.load_defaults()
if config:
ConfigurationManager.__config.update(config)
@staticmethod
def save(config, is_system=False):
"""
Save configuration ``config``.
"""
ConfigurationManager.update(config)
location = SYSTEM_CONFIG if is_system else RUNTIME_CONFIG
try:
LOG.info("Saving config: " + location)
dir = location.replace("/jarbas_runtime.conf", "").replace("/jarbas.conf", "")
if not exists(dir):
mkdir(dir)
try:
loc_config = load_commented_json(location)
except:
loc_config = {}
with open(location, 'w') as f:
config = loc_config.update(config)
json.dump(config, f)
except Exception as e:
LOG.error(e)
class _ConfigurationListener(object):
""" Utility to synchronize remote configuration changes locally
This listens to the messagebus for
'configuration.updated', and refreshes the cached configuration when this
is encountered.
'configuration.update', and updates the cached configuration when this
is encountered.
"""
def __init__(self, ws):
super(_ConfigurationListener, self).__init__()
ws.on("configuration.updated", self.updated)
ws.on("configuration.patch", self.patch)
@staticmethod
def updated(message):
"""
Event handler for configuration updated events. Forces a reload
of all configuration sources.
Args:
message: message bus message structure
"""
ConfigurationManager.load_defaults()
@staticmethod
def patch(message):
"""
Event handler for configuration update events.
Update config with provided data
Args:
message: message bus message structure
"""
config = message.data.get("config", {})
ConfigurationManager.load_internal(config)
| JarbasAI/JarbasAI | mycroft/configuration/__init__.py | Python | gpl-3.0 | 12,270 |
from .spike_detection import highest_spike
class ContentExtractor:
def __init__(self, config):
self.config = config
def extract(self, article):
text_lines = article.rawtext.split('\n')
line_values = [len(line) for line in text_lines]
l,r,_ = highest_spike(line_values,self.config.rm_soft_period,self.config.rm_hard_period)
selected_lines = text_lines[l:r]
article.content = "\n".join([line for line in selected_lines])
return | villasv/pyWebNectar | webnectar/extractors/ContentExtractor.py | Python | gpl-3.0 | 493 |
#!/usr/bin/env python
"""
gets basic info about AVI file using OpenCV
input: filename or cv2.Capture
"""
from pathlib import Path
from struct import pack
from typing import Dict, Any
import cv2
def getaviprop(fn: Path) -> Dict[str, Any]:
if isinstance(fn, (str, Path)): # assuming filename
fn = Path(fn).expanduser()
if not fn.is_file():
raise FileNotFoundError(fn)
v = cv2.VideoCapture(str(fn))
if v is None:
raise OSError(f'could not read {fn}')
else: # assuming cv2.VideoCapture object
v = fn
if not v.isOpened():
raise OSError(f'cannot read {fn} probable codec issue')
vidparam = {
'nframe': int(v.get(cv2.CAP_PROP_FRAME_COUNT)),
'xy_pixel': (
int(v.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(v.get(cv2.CAP_PROP_FRAME_HEIGHT)),
),
'fps': v.get(cv2.CAP_PROP_FPS),
'codec': fourccint2ascii(int(v.get(cv2.CAP_PROP_FOURCC))),
}
if isinstance(fn, Path):
v.release()
return vidparam
def fourccint2ascii(fourcc_int: int) -> str:
"""
convert fourcc 32-bit integer code to ASCII
"""
assert isinstance(fourcc_int, int)
return pack('<I', fourcc_int).decode('ascii')
if __name__ == '__main__':
from argparse import ArgumentParser
p = ArgumentParser(description='get parameters of AVI file')
p.add_argument('avifn', help='avi filename')
p = p.parse_args()
vidparam = getaviprop(p.avifn)
print(vidparam)
| scienceopen/CVutils | morecvutils/getaviprop.py | Python | mit | 1,520 |
#!/usr/bin/env python3
'''
KLL Data Dropper (Doesn't emit anything)
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
### Imports ###
from kll.common.emitter import Emitter
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class Drop(Emitter):
'''
Doesn't emit at all, just ignores everything
'''
def __init__(self, control):
'''
Emitter initialization
@param control: ControlStage object, used to access data from other stages
'''
Emitter.__init__(self, control)
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
def output(self):
'''
Final Stage of Emitter
Nothing to do
'''
def process(self):
'''
Emitter Processing
Nothing to do, just dropping all the results
'''
| kiibohd/kll | kll/emitters/none/none.py | Python | gpl-3.0 | 1,805 |
from django.conf.urls import include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', 'site.views.home', name='home'),
# url(r'^site/', include('site.foo.urls')),
#(r'^cache/', include('django_memcached.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^mediaviewer/', include('mediaviewer.urls', namespace='mediaviewer')),
]
| kyokley/MediaViewer | mysite/urls.py | Python | mit | 658 |
#!/usr/bin/env python
#************************************************************************
# Compilation: javac ThreeSum.java
# Execution: java ThreeSum input.txt
# Dependencies: In.java StdOut.java Stopwatch.java
# Data files: http://algs4.cs.princeton.edu/14analysis/1Kints.txt
# http://algs4.cs.princeton.edu/14analysis/2Kints.txt
# http://algs4.cs.princeton.edu/14analysis/4Kints.txt
# http://algs4.cs.princeton.edu/14analysis/8Kints.txt
# http://algs4.cs.princeton.edu/14analysis/16Kints.txt
# http://algs4.cs.princeton.edu/14analysis/32Kints.txt
# http://algs4.cs.princeton.edu/14analysis/1Mints.txt
#
# A program with cubic run_timedning time. Read in N integers
# and counts the number of triples that sum to exactly 0
# (ignoring integer overflow).
#
# % java ThreeSum 1Kints.txt
# 70
#
# % java ThreeSum 2Kints.txt
# 528
#
# % java ThreeSum 4Kints.txt
# 4039
#
#************************************************************************/
#*
# The <tt>ThreeSum</tt> class provides static methods for counting
# and printing the number of triples in an array of integers that sum to 0
# (ignoring integer overflow).
# <p>
# This implementation uses a triply nested loop and takes proportional to N^3,
# where N is the number of integers.
# <p>
# For additional documentation, see <a href="http://algs4.cs.princeton.edu/14analysis">Section 1.4</a> of
# <i>Algorithms, 4th Edition</i> by Robert Sedgewick and Kevin Wayne.
#
# @author Robert Sedgewick
# @author Kevin Wayne
#
#/
####################################################################
# Lecture Week 1 Observations (10:05)
####################################################################
#
# 3-SUM: Given N distinct integers, how many triples sum to exactly zero?
#
# a = [30 -40 -20 -10 40 0 10 5]
#
# a[i] a[j] a[k] sum
# --- ---- ---- ---
# 1 30 -40 10 0
# 2 30 -20 -10 0
# 3 -40 40 0 0
# 4 -10 0 10 0
#
# CONTEXT: Deeply related to problems in computational geometry.
# graphics, movies, etc.
####################################################################
# Lecture Week 1 Mathematical Models (12:48)
####################################################################
#
# 03:10 COST OF BASIC OPERATIONS
# variable declaration int a c1
# assignment statement a = b c2
# integer compare a < b c3
# array element access a[i] c4
# array length a.length c5
# 1D array allocation new int[N] c6*N
# 2D array allocation new int[N][N] c7*N^2
# string length s.length c8
# substring extraction s.substring(N/2, N) c9
# string concatenation s + t c10*N
#
# NOVICE MISTAKE. Abusive string concatenation.
# If you concatenate 2 strings, run_timedning time is proportional to length of string.
# 03:56 HOW MANY INSTRUCTIONS AS A FUNCTION OF INPUT SIZE N?
#
# int count = 0;
# for (int i = 0; i<N; i++)
# if (a[i] == 0)
# count++;
#
# operation freq code
# ---------------------- ----- --------
# variable declaration 2 i, cnt
# assignment statement 2 i=0, cnt=0
# less than compare N + 1 i<N
# equal to compare N a[i] == 0
# array access N a[i]
# increment N to 2 N i inc N times. cnt inc 0 to N times (dep. on input data)
# 05:03 EXAMPLE 2-SUM: HOW MANY INSTRUCTIONS AS A FUNCTION OF INPUT SIZE N?
#
# int count = 0;
# for (int i = 0; i< N;i++)
# for (int j = i+1; j < N; j++)
# if (a[i] + a[j] == 0)
# count++;
#
# 09:12 BOTTOM LINE. Use COST MODEL and TILDE NOTATION to simplify counts.
# ANSWER: ~N^2 array accesses.
#
# if (a[i] + a[j] == 0): (N) <- N choose 2
# 0 + 1 + 2 + ... + (N-1) = 1/2(N)(N-1) = (2)
#
# operation freq tilde code
# ---------------------- ----- -------- ----------
# 0 variable declaration N + 2 ~N i, cnt
# 1 assignment statement N + 2 ~N i=0, cnt=0
# *2 less than compare 1/2(N+1)(N+2) ~1/2N^2 i<N
# *3 equal to compare 1/2(N)(N-1) ~1/2N^2 a[i] == 0
# *4 array access N(N-1) ~N^2 a[i]
# *5 increment 1/2(N)(N-1) to N(N-1) 1/2N^2 to ~N^2
# *2-*5 are tedious to compute
#
# 7:08 So use the operation that is either/both:
# * the most expensive
# * the most frequent
#
# SIMPLIFICATION 1: Choose array accesses as most important to count
# 07:20 SIMPLIFICATION 2: TILDE NOTATION (Ignore low order terms in derived functions)
#
# * Estimate run_timedning time (or memory) as a function of input size N.
# * Ignore lower order terms.
# - when N is large, terms are negliglible
# - when N is small, we don't care
#
# EX1: 1/6N^3 + 20N + 16 ~ 1/6N^3
# EX2: 1/6N^3 + 100N^(4/3) + 56 ~ 1/6N^3
# EX3: 1/6N^3 - 1/2N^2 + 1/3N ~ 1/6N^3
# 08:12 TECHNICAL DEFINITION.
#
# f(N) ~ g(N) means lim f(N)
# N->Inf ---- = 1
# g(N)
# 10:00 APPROXIMATELY how many ARRAY ACCESSES as a function of input size N
# for ThreeSum:
#
# if (a[i] + a[j] + a[k] == 0)
#
# /N\ = N(N-1)(N-2) 1
# \3/ ----------- ~ - N^3
# 3! 6
#
# ANSWER: THREESUM has 1/3N^3 array accesses
# 11:31 ESTIMATING A DISCRETE SUM: Replacing a discrete sum w/an integral:
#
# EX1: 1 + 2 +...+ N. SUM(i=1:N) ~ Integral(x=1:N)[x dx] ~ 1/2 N^2
# EX2: 1 + 1/2 + 1/3 +...+ 1/N SUM(i=1:N) ~ Insegral(x=1:N)[1/x dx] ~ ln N
# EX3: 3-sum triple loop. SUM(i=1:N)SUM(y=x:N)SUM(z=y:N)dz dy dx ~ 1/6 N^3
# 11:45 MATHEMATICAL MODELS FOR RUNNING TIME
#
# IN PRINCIPLE. accurate mathematical models are available.
#
# IN PRACTICE.
# * Formulas can be complicated.
# * Advanced mathematics might be required.
# * Exact models best left for experts.
#
# T(N) = c1*A + c2*B + c3*C + c4*D + c5*E
# A = array access
# B = integer add
# C = integer compare
# D = increment
# E = variable assignment
# cN depends on machine, compiler
# A..E frequencies: depend on algorithm, input
#
# BOTTOM LINE. We use APPROXIMATE models in this course: T(N) ~ cN^3
# 12:42 QUESTION: How many array accesses does the following code fragment
# make as a function of N? (Assume the compiler does not optimize away
# accesses in the innermost loop.)
#
# int sum = 0;
# for (int i = 0; i < N; i++)
# for (int j = i+1; j < N; j++)
# for (int k = 1; k < N; k = k*2)
# if (a[i] + a[j] >= a[k])
# sum++;
#
# ANSWER: ~3/2*N^2*lg(N)
#
# EXPLANATION: Not all tripl loops have cubic run_timedning times. for a given
# value of i and j, the k-loop requires only 3*lg(N) array access: the
# body is executed lg(N) times and each time involves 3 array accesses.
# As in the 2-SUM and 3-SUM analysis, the number of times the k-loop
# is executed is (N choose 2) ~ 1/2 N^2
####################################################################
# 01:45 Lecture Week 1 "Order-of-Growth Classifications (14:39)
####################################################################
# constant 1
# logarithmic log N
# linear N
# linearithmic N log N
# quadratic N^2
# cubic N^3
# exponential 2^N
####################################################################
# 01:45 Lecture Week 1 "Theory of Algorithms"
####################################################################
#
# EX 1. Array accesses for brute-force 3-SUM.
# BEST. ~ 1/2 N^2
# AVERAGE. ~ 1/2 N^2
# WORST. ~ 1/2 N^2
# 02:56 TYPES OF ANALYSES
#
# BEST CASE. Lower bound on cost.
# WORST CASE. Upper bound on cost.
# AVERAGE CASE. "Expected" cost.
#
# ACTUAL DATA MIGHT NOT MATCH INPUT MODEL?
# * Need to understand input to effectively process it.
# * Approach 1: design for the worst case.
# * Approach 2: randomize, depend on probabilistic guarantee.
# 02:58 51 GOALS.
# * Establish "difficulty" of a problem.
# * Develop "optimal" algorithms.
#
# APPROACH
# * Suppress details in analysis: analyze "to within a constant factor".
# * Eliminate variability in in put model by focusing on the worst case.
#
# OPTIMAL ALGORITHM
# * Performance guarantee (to within a constant factor) for any input.
# * No algorithm can provide a better performance guarantee.
# 04:36 COMMONLY-USED NOTATIONS IN THE THEORY OF ALGORITHMS
#
# NOTATION PROVIDES EXAMPLE SHORTHAND FOR USED TO
# --------- --------------- ---------- ----------------------- -------
# Big Theta asymptotic theta(N^2) 1/2 N^2 classify
# order of growth 10 N^2 algorithms
# 5 N^2 + 22 N log N + 3N
#
# Big Oh theta(N^2) Oh(N^2) 10 N^2 develop
# and smaller 100 N^2 upper bounds
# 22 N log N + 3N
#
# Big Omega theta(N^2) omega(N^2) 1/2 N^2 develop
# and larger N^5 lower bounds
# N^3 + 22 N log N + 3 N
#
# Tilde Leading term ~10^2 10 N^2 provide
# 11:14 10 N^2 + 22 N log N approximate
# 10 N^2 + 2 N + 37 model
#
# COMMON MISTAKE: Interpreting big-Oh as an approximate model.
# THIS COURSE: Focus on approximate models: use Tilde-notation
# 11:28 LECTURE QUESTION: Which of the fllowing functions is O(N^3)?
#
# 11 N + 15 lg N + 100
# 1/3 N^2
# 25,000 N^3
#
# ANSWER: ALL OF THE ABOVE
# EXPLANATION: Recall that big-Oh notation provides only an upper bound on the
# growth rate of a function as N gets large. In this course, we primarily
# use tilde notation because it more accurately describes the function -- it
# provides both an upper and lower bound on the function as well as
# the coefficient of the leading term.
#
# 07:02 53 THEORY OF ALGORITHMS: EXAMPLE 1
#
# EX: 1-Sum = "Is there a 0 in the array"
#
# UPPER BOUND. A specific algorithm.
# * Ex. Brute-force algorithm for 1-Sum: Look at every array entry.
# * Running time of the optimal algorithm for 1-Sum is )(N)
#
# LOWER BOUND. Proof that no algorithm can do better.
# * Ex. Have to examine all N entries (any unexamined one might be 0).
# * Running time of the optimal algorithm for 1-Sum is omega(N)
#
# OPTIMAL ALGORITHM.
# * Lower bound equals upper bound (to within a constant factor).
# * Ex. **Brute-fore algorithm for 1-Sum is optimal: its running time is theta(N).
# 07:38 55 THEORY OF ALGORITHMS: EXAMPLE 2
#
# EX: 3-Sum
#
# UPPER BOUND. A specific algorithm.
# * Ex. Improved algorithm for 3-Sum
# * Running time of the optimal algorithm for 3-Sum is O(N^2 log N)
#
# LOWER BOUND. Proof that no algorithm can do better.
# * Ex. Have to examine all N entries to solve 3-Sum.
# * Running time of the optimal algorithm for 3-SUm is omega(N)
#
# OPTIMAL ALGORITHM.
# * Optimal algorithm for 3-Sum?
# * Subquadratic algorithm of Quadratic lower bound for 3-Sum?
# Do not know.
# DO not know if alg is < O(N^2)
# 08:42 56 ALGORITHM DESIGN APPROACH
#
# START
# * Develop an algorithm
# * Prove a lower bound
#
# GAP?
# * Lower the upper bound (discover a new algorithm)
# * Raise the lower bound (more difficult)
#
# GOLDEN AGE OF ALGORITHM DESIGN
# * 1970s-.
# * Steadily decreasing upper bounds for many imporant problems.
# * Many known optimal algorithms.
#
# CAVEATS.
# * Overly pessimistic to focus on worst case?
# * Need closer analysis than "to within a constant factor" to predict performance.
#
####################################################################
# Lecture Week 1 "Memory" (08:11)
####################################################################
# BASICS
# BIT. 0 or 1
# BYTE. 8 bits
# MEGABYTE (MB). 1 million or 2^20 bytes
# GIGABYTE (GB). 1 billion or 2^30 bytes
# TYPICAL MEMORY USAGE FOR PRIMITIVE TYPES
# type bytes
# ------- -----
# boolean 1
# byte 1
# char 2
# int 4
# float 4
# long 8
# double 8
# 02:42 TYPICAL MEMORY USAGE FOR ARRAYS
# type bytes
# --------- --------
# char[] 2N + 24
# int[] 4N + 24
# double[] 8N + 24
# 03:00 FOR TWO DIMENSIONAL ARRAYS
# type bytes
# --------- --------
# char[][] ~ 2 M N
# int[][] ~ 4 M N
# double[][] ~ 8 M N
# 03:42 TYPICAL USAGE FOR OBJECTS IN JAVA
# OBJECT OVERHEAD. 16 bytes
# OBJECT REFERENCE. 8 bytes
# OBJECT. 16 bytes + memory for each instance variable
# +8 if inner class (for pointer to enclosing class).
# PADDING. Each object uses a multiple of 8 bytes.
# 05:28
# SHALLOW MEMORY USAGE: Don't count referenced objects.
# DEEP MOMORY USAGE: If array entry or instance is a refence,
# add memory (recursively) for referenced object.
# 04:00 EX 1 DATE OBJECT
#
# public class Date // 16 bytes (object overhead)
# {
# private int day; // 4 bytes (int)
# private int month; // 4 bytes (int)
# private int year; // 4 bytes (int)
# ... //----------------
# } // 4 bytes (padding)
# //----------------
# // 32 bytes
# 04:29 EX 2 A VIRGIN STRING OF LENGTH N USES ~2N BYTES OF MEMORY
#
# public class String // 16 bytes (object overhead)
# {
# private char[] value; // 8 bytes (reference to array)
# // 2N + 24 bytes (char[] array)
# private int offset; // 4 bytes (int)
# private int count; // 4 bytes (int)
# private int hash; // 4 bytes (int)
# ... //----------------
# } // 4 bytes (padding)
# //----------------
# // 2N + 64 bytes
# 06:00 public class WeightedQuickUnionUF { // 16 bytes (object overhead)
# private int[] id; // 4N + 24 bytes (int[] array)
# // 8 bytes (reference to array)
# private int[] sz; // 4N + 24 bytes (int[] array)
# // 8 bytes (reference to array)
# private int count; // 4 bytes (int)
# -----------
# // 8N + 84
# // + 4 bytes padding
from AlgsSedgewickWayne.testcode.InputArgs import get_ints_from_file
import sys
import itertools
import timeit
import datetime
# Returns the number of triples (i, j, k) with i < j < k such that a[i] + a[j] + a[k] == 0.
# @param a the array of integers
# @return the number of triples (i, j, k) with i < j < k such that a[i] + a[j] + a[k] == 0
# http://stackoverflow.com/questions/25712596/why-is-the-map-version-of-threesum-so-slow/25717916#25717916
def count_slow(a, prt=False): # initial translate of Java (super slow)
"""ThreeSum: Given N distinct integers, how many triples sum to exactly zero?"""
print("RUNNING count_slow...")
N = len(a)
cnt = 0
for i in range(N):
for j in range(i+1, N):
for k in range(j+1, N):
if sum([a[i], a[j], a[k]]) == 0:
cnt += 1
if prt:
sys.stdout.write('{:7d} + {:7d} + {:7d}\n'.format(a[i], a[j], a[k]))
return cnt
def count_itertools(a): # written by Ashwini Chaudhary
"""ThreeSum using itertools"""
print("RUNNING count_itertools, written by Ashwini Chaudhary...")
return sum((1 for x in itertools.combinations(a, r=3) if not sum(x)))
def count_itertools_faster(a): # written by Veedrak/modified (fastest)
print("RUNNING count_itertools (faster), written by Veedrak(modified)...")
return sum(1 for x, y, z in itertools.combinations(a, r=3) if x+y==-z)
def count_fixed(a): # written by roippi
print("RUNNING count_fixed, written by roippi...")
N = len(a)
cnt = 0
for i in range(N):
for j in range(i+1, N):
for k in range(j+1, N):
if a[i] + a[j] + a[k] == 0:
cnt += 1
return cnt
def count_enumerate(a): # written by roippi
print("RUNNING count_enumerate, written by roippi...")
cnt = 0
for i, x in enumerate(a):
for j, y in enumerate(a[i+1:], i+1):
for z in a[j+1:]:
if x + y + z == 0:
cnt += 1
return cnt
# --------------------------------------------------------------------------------------
# Reads in a sequence of integers from a file, specified as a command-line argument;
# counts the number of triples sum to exactly zero; prints out the time to perform
# the computation.
def run_timed(a, cnt_fnc=count_enumerate):
"""Run ThreeSum and report the elapsed time."""
tic = timeit.default_timer()
cnt = cnt_fnc(a)
sys.stdout.write('ThreeSum found {} times when run_timed on {} integers\n'.format(cnt, len(a)))
sys.stdout.write("Elapsed HMS: {}\n".format(
str(datetime.timedelta(seconds=(timeit.default_timer()-tic)))))
def run_timed_fin(fin, cnt_fnc=count_enumerate):
"""Run ThreeSum using integers stored in a column in a file."""
sys.stdout.write('\nRunning ThreeSum on data in: {}\n'.format(fin))
run_timed(get_ints_from_file(fin), cnt_fnc)
def run_timed_fins(fins):
"""Run ThreeSum on multiple files containing integers."""
for fin in fins:
run_timed_fin(fin)
if __name__ == '__main__':
import os
from random import randrange
# If there are no arguments, run 1 example using a few different Python algorithms
# to show different ways to implement the O(N^3) algorithm in Python
if len(sys.argv) == 1:
run_timed_fin('../../thirdparty/1Kints.txt', count_slow)
run_timed_fin('../../thirdparty/1Kints.txt', count_itertools)
run_timed_fin('../../thirdparty/1Kints.txt', count_itertools_faster)
run_timed_fin('../../thirdparty/1Kints.txt', count_fixed)
run_timed_fin('../../thirdparty/1Kints.txt', count_enumerate)
# Run all the examples from the Princeton Algorithms book-site
elif sys.argv[1] == 'all':
fins = [
'../../thirdparty/1Kints.txt',
'../../thirdparty/2Kints.txt',
'../../thirdparty/4Kints.txt',
'../../thirdparty/8Kints.txt']
run_timed_fins(fins)
# If the argument is a file, run using the integers from that file
elif os.path.isfile(sys.argv[1]):
run_timed_fin(sys.argv[1])
# If the argument is a digit, run using that many randomly chosen digits.
elif sys.argv[1].isdigit():
dig = int(sys.argv[1])
a = [randrange(-2*dig, 2*dig) for i in range(dig)]
run_timed(a)
# Copyright (C) 2002-2010, Robert Sedgewick and Kevin Wayne.
# Java Last updated: Tue Sep 24 09:27:51 EDT 2013.
| dvklopfenstein/PrincetonAlgorithms | py/AlgsSedgewickWayne/ThreeSum.py | Python | gpl-2.0 | 18,990 |
# make the other metrics work
# generate the txt files, then work on the pdf otuput
__version__ = "0.1.0"
import numpy as np
import pandas as pd
# import matplotlib
# matplotlib.use('pdf')
import matplotlib.pyplot as plt
import sys
import os
import networkx as nx
import PHRG
import probabilistic_cfg as pcfg
import net_metrics as metrics
import load_edgelist_from_dataframe as tdf
import pprint as pp
import argparse, traceback
DBG = False
def get_parser ():
parser = argparse.ArgumentParser(description='exact_phrg: infer a model given a graph (derive a model')
parser.add_argument('g_fname', metavar='G_FNAME', nargs=1, help='Filename of edgelist graph')
parser.add_argument('--chunglu', help='Generate chunglu graphs',action='store_true')
parser.add_argument('--kron', help='Generate Kronecker product graphs',action='store_true')
parser.add_argument('--version', action='version', version=__version__)
return parser
def Hstar_Graphs_Control (G, graph_name, axs):
print '-',Hstar_Graphs_Control,'-'
# Derive the prod rules in a naive way, where
prod_rules = PHRG.probabilistic_hrg_learning(G)
print prod_rules
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in prod_rules:
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
num_nodes = G.number_of_nodes()
print "Starting max size", 'n=', num_nodes
g.set_max_size(num_nodes)
print "Done with max size"
Hstars = []
num_samples = 20
print '*' * 40
for i in range(0, num_samples):
rule_list = g.sample(num_nodes)
hstar = PHRG.grow(rule_list, g)[0]
Hstars.append(hstar)
# if 0:
# g = nx.from_pandas_dataframe(df, 'src', 'trg', edge_attr=['ts'])
# draw_degree_whole_graph(g,axs)
# draw_degree(Hstars, axs=axs, col='r')
# #axs.set_title('Rules derived by ignoring time')
# axs.set_ylabel('Frequency')
# axs.set_xlabel('degree')
if 1:
# metricx = [ 'degree','hops', 'clust', 'assort', 'kcore','eigen','gcd']
metricx = ['degree', 'gcd']
# g = nx.from_pandas_dataframe(df, 'src', 'trg',edge_attr=['ts'])
# graph_name = os.path.basename(f_path).rstrip('.tel')
if DBG: print ">", graph_name
metrics.network_properties([G], metricx, Hstars, name=graph_name, out_tsv=True)
def pandas_dataframes_from_edgelists (el_files):
if (el_files is None): return
list_of_dataframes = []
for f in el_files:
print '~' * 80
print f
temporal_graph = False
with open(f, 'r') as ifile:
line = ifile.readline()
while (not temporal_graph):
if ("%" in line):
line = ifile.readline()
elif len(line.split()) > 3:
temporal_graph = True
if (temporal_graph):
dat = np.genfromtxt(f, dtype=np.int64, comments='%', delimiter="\t", usecols=[0, 1, 3], autostrip=True)
df = pd.DataFrame(dat, columns=['src', 'trg', 'ts'])
else:
dat = np.genfromtxt(f, dtype=np.int64, comments='%', delimiter="\t", usecols=[0, 1], autostrip=True)
df = pd.DataFrame(dat, columns=['src', 'trg'])
df = df.drop_duplicates()
list_of_dataframes.append(df)
return list_of_dataframes
def grow_exact_size_hrg_graphs_from_prod_rules(prod_rules, gname, n, runs=1):
"""
Args:
rules: production rules (model)
gname: graph name
n: target graph order (number of nodes)
runs: how many graphs to generate
Returns: list of synthetic graphs
"""
if n <=0: sys.exit(1)
print runs
print n
print gname
for i,x in enumerate(prod_rules):
print i,' ', x[:1]
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in prod_rules:
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
print '... pcfg.Grammar'
g.set_max_size(n)
print "Done with max size"
if DBG: print '*' * 40
hstars_lst = []
for i in range(0, runs):
rule_list = g.sample(n)
print 'g.sample'
hstar = PHRG.grow(rule_list, g)[0]
hstars_lst.append(hstar)
return hstars_lst
def pwrlaw_plot (xdata, ydata, yerr):
from scipy import linspace, randn, log10, optimize, sqrt
powerlaw = lambda x, amp, index: amp * (x**index)
logx = log10(xdata)
logy = log10(ydata)
logyerr = yerr / ydata
# define our (line) fitting function
fitfunc = lambda p, x: p[0] + p[1] * x
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
pinit = [1.0, -1.0]
out = optimize.leastsq(errfunc, pinit,
args=(logx, logy, logyerr), full_output=1)
pfinal = out[0]
covar = out[1]
print pfinal
print covar
index = pfinal[1]
amp = 10.0**pfinal[0]
indexErr = sqrt( covar[0][0] )
ampErr = sqrt( covar[1][1] ) * amp
print index
# ########
# plotting
# ########
# ax.plot(ydata)
# ax.plot(pl_sequence)
fig, axs = plt.subplots(2,1)
axs[0].plot(xdata, powerlaw(xdata, amp, index)) # Fit
axs[0].errorbar(xdata, ydata, yerr=yerr, fmt='k.') # Data
(yh1,yh2) = (axs[0].get_ylim()[1]*.9, axs[0].get_ylim()[1]*.8)
xh = axs[0].get_xlim()[0]*1.1
print axs[0].get_ylim()
print (yh1,yh2)
axs[0].text(xh, yh1, 'Ampli = %5.2f +/- %5.2f' % (amp, ampErr))
axs[0].text(xh, yh2, 'Index = %5.2f +/- %5.2f' % (index, indexErr))
axs[0].set_title('Best Fit Power Law')
axs[0].set_xlabel('X')
axs[0].set_ylabel('Y')
# xlim(1, 11)
#
# subplot(2, 1, 2)
axs[1].loglog(xdata, powerlaw(xdata, amp, index))
axs[1].errorbar(xdata, ydata, yerr=yerr, fmt='k.') # Data
axs[1].set_xlabel('X (log scale)')
axs[1].set_ylabel('Y (log scale)')
import datetime
figfname = datetime.datetime.now().strftime("%d%b%y")+"_pl"
plt.savefig(figfname, bbox_inches='tight')
return figfname
def deg_vcnt_to_disk(orig_graph, synthetic_graphs):
df = pd.DataFrame(orig_graph.degree().items())
gb = df.groupby([1]).count()
# gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
gb.index.rename('k',inplace=True)
gb.columns=['vcnt']
gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
# ## - group of synth graphs -
deg_df = pd.DataFrame()
for g in synthetic_graphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
# Degree vs cnt
deg_df = pd.concat([deg_df, gb], axis=1) # Appends to bottom new DFs
# print gb
deg_df['mean'] = deg_df.mean(axis=1)
deg_df.index.rename('k',inplace=True)
deg_df['mean'].to_csv("Results/deg_xphrg_"+orig_graph.name+".tsv", sep='\t', header=True)
def plot_g_hstars(orig_graph, synthetic_graphs):
df = pd.DataFrame(orig_graph.degree().items())
gb = df.groupby([1]).count()
# gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
gb.index.rename('k',inplace=True)
gb.columns=['vcnt']
# k_cnt = [(x.tolist(),y.values[0]) for x,y in gb.iterrows()]
xdata = np.array([x.tolist() for x,y in gb.iterrows()])
ydata = np.array([y.values[0] for x,y in gb.iterrows()])
yerr = ydata *0.000001
fig, ax = plt.subplots()
ax.plot(gb.index.values, gb['vcnt'].values,'-o', markersize=8, markerfacecolor='w', markeredgecolor=[0,0,1], alpha=0.5, label="orig")
ofname = pwrlaw_plot(xdata, ydata,yerr)
if os.path.exists(ofname): print '... Plot save to:',ofname
deg_df = pd.DataFrame()
for g in synthetic_graphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
# Degree vs cnt
deg_df = pd.concat([deg_df, gb], axis=1) # Appends to bottom new DFs
# print gb
deg_df['mean'] = deg_df.mean(axis=1)
deg_df.index.rename('k',inplace=True)
# ax.plot(y=deg_df.mean(axis=1))
# ax.plot(y=deg_df.median(axis=1))
# ax.plot()
# orig
deg_df.mean(axis=1).plot(ax=ax,label='mean',color='r')
deg_df.median(axis=1).plot(ax=ax,label='median',color='g')
ax.fill_between(deg_df.index, deg_df.mean(axis=1) - deg_df.sem(axis=1),
deg_df.mean(axis=1) + deg_df.sem(axis=1), alpha=0.2, label="se")
# ax.plot(k_cnt)
# deg_df.plot(ax=ax)
# for x,y in k_cnt:
# if DBG: print "{}\t{}".format(x,y)
#
#
# for g in synths:
# df = pd.DataFrame(g.degree().items())
# gb = df.groupby([1]).count()
# # gb.plot(ax=ax)
# for x,y in k_cnt:
# if DBG: print "{}\t{}".format(x,y)
#
# # Curve-fit
#
plt.savefig('tmpfig', bbox_inches='tight')
def get_hrg_production_rules(edgelist_data_frame, graph_name):
from growing import derive_prules_from
df = edgelist_data_frame
try:
G = nx.from_pandas_dataframe(df, 'src', 'trg', ['ts']) # whole graph
except Exception, e:
print '==========================\n\t',
print str(e)
traceback.print_exc()
G = nx.from_pandas_dataframe(df, 'src', 'trg')
# os._exit(1)
G.name = graph_name
prules = derive_prules_from([G])
# Synthetic Graphs
hStars = grow_exact_size_hrg_graphs_from_prod_rules(prules[0], graph_name, G.number_of_nodes(),10)
print '... hStart graphs:',len(hStars)
# plot_g_hstars(G,hStars)
deg_vcnt_to_disk(G, hStars)
if 1:
metricx = ['degree']# ,'hops', 'clust', 'assort', 'kcore','eigen','gcd']
metrics.network_properties([G], metricx, hStars, name=graph_name, out_tsv=True)
if __name__ == '__main__':
parser = get_parser()
args = vars(parser.parse_args())
in_file = args['g_fname'][0]
datframes = tdf.Pandas_DataFrame_From_Edgelist([in_file])
df = datframes[0]
# g_name = os.path.basename(in_file).lstrip('out.')
g_name = os.path.basename(in_file).split('.')[1]
print '...', g_name
if args['chunglu']:
print 'Generate chunglu graphs given an edgelist'
sys.exit(0)
elif args['kron']:
print 'Generate chunglu graphs given an edgelist'
sys.exit(0)
try:
get_hrg_production_rules(df,g_name)
except Exception, e:
print 'ERROR, UNEXPECTED SAVE PLOT EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
sys.exit(0)
| nddsg/TreeDecomps | xplodnTree/tdec/exact_phrg.py | Python | mit | 10,046 |
#!/usr/bin/env python
#
# ___INFO__MARK_BEGIN__
#######################################################################################
# Copyright 2016-2021 Univa Corporation (acquired and owned by Altair Engineering Inc.)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
# ___INFO__MARK_END__
#
import string
import random
import os
import os.path
import json
from tempfile import NamedTemporaryFile
from nose import SkipTest
from nose.tools import make_decorator
CONFIG_FILE = '/tmp/uge.test.conf'
LOG_FILE = '/tmp/uge.test.log'
CONFIG_FILE_TEMPLATE = """
[LoggerLevels]
root=error
expressions: ^.*$=trace
[ConsoleLogging]
handler=stream_log_handler.StreamLogHandler(sys.stdout,)
level=debug
format=%(asctime)s %(levelname)s %(process)d %(filename)s:%(lineno)d %(message)s
datefmt=%Y-%m-%d %H:%M:%S
[FileLogging]
handler=timed_rotating_file_log_handler.TimedRotatingFileLogHandler('LOG_FILE')
level=trace
format=%(asctime)s %(levelname)s %(process)d %(message)s
datefmt=%Y-%m-%d %H:%M:%S
"""
def create_config_file(use_temporary_file=False):
config_file_name = CONFIG_FILE
if use_temporary_file:
config_file = NamedTemporaryFile(delete=False)
config_file_name = config_file.name
else:
if os.path.exists(config_file_name):
create_config_manager()
return
config_file = open(config_file_name, 'w')
config_string = CONFIG_FILE_TEMPLATE.replace(
'LOG_FILE', LOG_FILE)
config_file.write(config_string)
config_file.close()
create_config_manager()
return config_file_name
def remove_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
def remove_test_log_file():
remove_file(LOG_FILE)
def remove_test_config_file():
remove_file(CONFIG_FILE)
def remove_test_files():
remove_file(LOG_FILE)
remove_file(CONFIG_FILE)
def read_last_line(file_path):
f = open(file_path, 'r')
last_line = None
while True:
line = f.readline()
if not line:
break
last_line = line
f.close()
return last_line
def read_last_log_line():
return read_last_line(LOG_FILE)
def create_config_manager():
from uge.config.config_manager import ConfigManager
cm = ConfigManager.get_instance()
cm.set_config_file(CONFIG_FILE)
cm.set_log_file(LOG_FILE)
cm.set_file_log_level('trace')
def generate_random_string(size, chars=string.ascii_lowercase + string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def generate_random_string_list(n_strings, string_length, delimiter=',', string_prefix=''):
string_list = ''
string_delimiter = ''
for i in range(0, n_strings):
string_list = '%s%s%s%s' % (string_list, string_delimiter,
string_prefix,
generate_random_string(string_length))
string_delimiter = delimiter
return string_list
def load_values(value_file):
tpd = {}
if os.path.exists(value_file):
tpd = json.load(open(value_file))
return tpd
# Common decorators
def needs_setup(func):
def inner(*args, **kwargs):
create_config_file()
return func(*args, **kwargs)
return make_decorator(func)(inner)
def needs_cleanup(func):
def inner(*args, **kwargs):
remove_test_files()
return func(*args, **kwargs)
return make_decorator(func)(inner)
def needs_config(func):
def inner(*args, **kwargs):
try:
create_config_manager()
except Exception as ex:
print(ex)
raise SkipTest("Config manager instance could not be created.")
return func(*args, **kwargs)
return make_decorator(func)(inner)
def needs_uge(func):
def inner(*args, **kwargs):
from uge.exceptions.configuration_error import ConfigurationError
if 'SGE_ROOT' not in os.environ:
raise ConfigurationError('SGE_ROOT is not defined.')
return func(*args, **kwargs)
return make_decorator(func)(inner)
#############################################################################
# Testing
if __name__ == '__main__':
# print 'Last line: ', read_last_line('/tmp/uge.log')
create_config_file()
d = load_values('test_values.json')
print(d)
| gridengine/config-api | test/utils.py | Python | apache-2.0 | 4,927 |
#
# Copyright (C) 2005-2010 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import gettext
_ = gettext.translation('yali', fallback=True).ugettext
from PyQt4.Qt import QWidget, SIGNAL, QPixmap, Qt, QListWidgetItem, QSize, QTimeLine, QTimer
import yali.util
import yali.context as ctx
from yali.gui import ScreenWidget
from yali.gui.Ui.collectionswidget import Ui_CollectionsWidget
from yali.gui.Ui.collectionitem import Ui_CollectionItem
CLOSED_SIZE = 36
ANIMATE_TIME = 400
EXPANDED_SIZE = 146
class Widget(Ui_CollectionsWidget, QWidget, ScreenWidget):
name = "collectionSelection"
def __init__(self):
QWidget.__init__(self)
self.setupUi(self)
self.collections = None
self.current_item = None
self.last_item = None
self.collectionList.itemClicked.connect(self.openItem)
self.collectionList.currentItemChanged.connect(self.itemChanged)
def fillCollections(self):
self.collectionList.clear()
selected = None
for index, collection in enumerate(self.collections):
self.addItem(collection)
if ctx.installData.autoCollection == collection:
selected = index
if not selected:
selected = 0
self.current_item = self.collectionList.item(selected)
self.last_item = self.current_item
self.collectionList.setCurrentRow(selected)
def shown(self):
self.collections = ctx.collections
self.fillCollections()
ctx.mainScreen.disableNext()
if self.current_item:
self.openItem(self.current_item)
else:
self.openItem(self.collectionList.item(0))
self.check()
def execute(self):
ctx.installData.autoCollection = self.collectionList.itemWidget(self.current_item).collection
return True
def check(self):
if self.current_item:
ctx.mainScreen.enableNext()
else:
ctx.mainScreen.disableNext()
def itemChanged(self, current, previous):
self.current_item = current
self.check()
def addItem(self, collection):
item = QListWidgetItem(self.collectionList)
item.setSizeHint(QSize(36, CLOSED_SIZE))
self.collectionList.addItem(item)
self.collectionList.setItemWidget(item, CollectionItem(self, collection, item))
def openItem(self, item):
if item == self.last_item:
return
if self.last_item:
self.closeItem(self.last_item)
self.animation = QTimeLine(ANIMATE_TIME, self)
self.animation.setFrameRange(36, EXPANDED_SIZE)
self.animation.frameChanged.connect(lambda x: item.setSizeHint(QSize(32, x)))
self.animation.start()
self.last_item = item
self.animation.finished.connect(lambda: self.collectionList.setCurrentItem(item))
def closeItem(self, item):
animation = QTimeLine(ANIMATE_TIME, self)
animation.setFrameRange(146, CLOSED_SIZE)
animation.frameChanged.connect(lambda x: item.setSizeHint(QSize(32, x)))
animation.start()
class CollectionItem(Ui_CollectionItem, QWidget):
def __init__(self, parent, collection, item):
QWidget.__init__(self, parent)
self.setupUi(self)
self.parent = parent
self.item = item
self.collection = collection
self.header.setText(collection.title)
self.description.setText(collection.description)
icon = QPixmap(":/gui/pics/%s" % collection.icon)
if icon.isNull():
icon = QPixmap(":/gui/pics/systemsettings.png")
self.icon.setPixmap(icon)
| akuster/yali | yali/gui/ScrCollection.py | Python | gpl-2.0 | 3,898 |
"""This module contains the unit tests for
the formatting of block quotes.
"""
import unittest
import PyMarkdownGen.PyMarkdownGen as md
class BlockquoteTests(unittest.TestCase):
"""The test case (fixture) for testing block quotes."""
def test_block_quote(self):
"""Tests block quotes that contains a '>'
on every line.
"""
expected = \
"""> this is a
> block quote
> on multiple
> lines.
"""
self.assertEqual(expected,
md.gen_block_quote(
"this is a\nblock quote\n"
"on multiple\r\nlines."))
def test_block_quote_simple(self):
"""Tests block quotes that contain a '>'
only on the first line.
"""
expected = \
"""> this is a simple
block quote
on multiple
lines.
"""
self.assertEqual(expected,
md.gen_block_quote(
"this is a simple\nblock quote\n"
"on multiple\nlines.", True))
if __name__ == '__main__':
unittest.main() # pragma: no cover
| LukasWoodtli/PyMarkdownGen | PyMarkdownGen/test/block_quote_test.py | Python | epl-1.0 | 1,125 |
#!/usr/bin/env python
import sys
from uda_common import read_feature_groups
from sklearn.datasets import load_svmlight_file
import numpy as np
from os.path import dirname, join
def main(args):
if len(args) < 1:
sys.stderr.write("One required argument: <reduced training data> [freq=50]\n")
sys.exit(-1)
freq_cutoff = 50 if len(args) <=1 else args[1]
data_dir = dirname(args[0])
groups_file = join(data_dir, 'reduced-feature-groups.txt')
## Find the feature index that tells us what domain we're in:
group_map = read_feature_groups(groups_file)
domain_indices = group_map["Domain"]
## load the data:
all_X, all_y = load_svmlight_file(args[0])
num_instances, num_feats = all_X.shape
data_X = []
## To start with, the set of valid_inds is all indices
## This prevents the zero index as a pivot (probably the intercept)
valid_inds = set(range(1, num_feats))
## Create a subset for each domain:
for domain_ind in domain_indices:
inst_inds = np.where(all_X[:,domain_ind].toarray() != 0)[0]
## Find all the variables that are sometimes greater than 0
nz_inds = set(np.where(all_X[inst_inds,:].max(0).toarray() > 0)[1])
## Find variables that are never greater than 1
lo_inds = set(np.where(all_X[inst_inds,:].max(0).toarray() <= 1)[1])
## Take the intersection
range_inds = nz_inds.intersection(lo_inds)
## Find those with high frequency
freq_inds = set(np.where(all_X[inst_inds].sum(0) > freq_cutoff)[1])
## Intersect high freq with correct range, then with existing valid ind
valid_inds = valid_inds.intersection(range_inds.intersection(freq_inds))
ind_list = list(valid_inds)
ind_list.sort()
for i in ind_list:
print(i)
if __name__ == "__main__":
main(sys.argv[1:])
| tmills/uda | scripts/create_freq_pivots.py | Python | apache-2.0 | 1,863 |
# -*- coding:utf8 -*-
"""
rtrlib.rtr_socket
-----------------
"""
from __future__ import absolute_import, unicode_literals
from enum import Enum
from _rtrlib import lib
class RTRSocketList(object):
"""
List of RTRSockets. Can be accessed like any other list.
Read Only.
"""
def __init__(self, sockets, length):
self._sockets = sockets
self._length = length
def __getitem__(self, key):
if not isinstance(key, int):
raise TypeError("Index must be int")
if key >= self._length:
raise IndexError("Index out of range")
elif key < 0:
raise IndexError("Index may not be negative")
return RTRSocket(self._sockets[key])
class RTRSocket(object):
"""
Wrapper around the rtr_socket struct
:param cdata socket: rtr_socket struct
"""
def __init__(self, socket):
self._socket = socket
@property
def expire_interval(self):
"""
Time period in seconds.
Received records are deleted if the client was unable to refresh \
data for this time period. If 0 is specified, the expire_interval \
is twice the refresh_interval.
"""
return self._socket.expire_interval
@property
def has_recieved_pdus(self):
"""
True, if this socket has already received PDUs
"""
return self._socket.has_recieved_pdus
@property
def last_update(self):
"""
Timestamp of the last validation record update.
Is 0 if the pfx_table doesn't stores any validation records from this \
rtr_socket.
"""
return self._socket.last_update
@property
def refresh_interval(self):
"""
Time period in seconds.
Tells the router how long to wait before next attempting \
to poll the cache, using a Serial Query or Reset Query PDU.
"""
return self._socket.refresh_interval
@property
def retry_interval(self):
"""
Time period in seconds between a failed query and the next attempt.
"""
return self._socket.retry_interval
@property
def state(self):
"""
Current state of the socket.
"""
return RTRSocketState(self._socket.state)
@property
def version(self):
"""
Protocol version used by this socket
"""
return self._socket.version
class RTRSocketState(Enum):
"""
States of the RTR socket
"""
CONNECTING = lib.RTR_CONNECTING
"""Socket is establishing the transport connection"""
ESTABLISHED = lib.RTR_ESTABLISHED
"""
Connection is established and socket is waiting for a Serial Notify or \
expiration of the refresh_interval timer.
"""
RESET = lib.RTR_RESET
"""Resetting RTR connection"""
SYNC = lib.RTR_SYNC
"""Receiving validation records from the RTR server"""
FAST_RECONNECT = lib.RTR_FAST_RECONNECT
"""Reconnect without any waiting period"""
ERROR_NO_DATA_AVAILABLE = lib.RTR_ERROR_NO_DATA_AVAIL
"""No validation records are available on the RTR server"""
# pylint: disable=invalid-name
ERROR_NO_INCREMENTAL_UPDATE_AVAILABLE = lib.RTR_ERROR_NO_INCR_UPDATE_AVAIL
"""Server was unable to answer the last serial or reset query"""
ERROR_FATAL = lib.RTR_ERROR_FATAL
"""Fatal protocol error occurred"""
ERROR_TRANSPORT = lib.RTR_ERROR_TRANSPORT
"""Error on the transport socket occurred"""
SHUTDOWN = lib.RTR_SHUTDOWN
"""RTR Socket is stopped"""
| rtrlib/python-binding | rtrlib/rtr_socket.py | Python | mit | 3,583 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import contextlib
import pytz
import datetime
import ipaddress
import itertools
import logging
import hmac
from collections import defaultdict
from hashlib import sha256
from itertools import chain, repeat
from lxml import etree
from lxml.builder import E
import passlib.context
from odoo import api, fields, models, tools, SUPERUSER_ID, _
from odoo.exceptions import AccessDenied, AccessError, UserError, ValidationError
from odoo.http import request
from odoo.osv import expression
from odoo.service.db import check_super
from odoo.tools import partition, collections
_logger = logging.getLogger(__name__)
# Only users who can modify the user (incl. the user herself) see the real contents of these fields
USER_PRIVATE_FIELDS = []
DEFAULT_CRYPT_CONTEXT = passlib.context.CryptContext(
# kdf which can be verified by the context. The default encryption kdf is
# the first of the list
['pbkdf2_sha512', 'plaintext'],
# deprecated algorithms are still verified as usual, but ``needs_update``
# will indicate that the stored hash should be replaced by a more recent
# algorithm. Passlib 1.6 supports an `auto` value which deprecates any
# algorithm but the default, but Ubuntu LTS only provides 1.5 so far.
deprecated=['plaintext'],
)
concat = chain.from_iterable
#
# Functions for manipulating boolean and selection pseudo-fields
#
def name_boolean_group(id):
return 'in_group_' + str(id)
def name_selection_groups(ids):
return 'sel_groups_' + '_'.join(str(it) for it in ids)
def is_boolean_group(name):
return name.startswith('in_group_')
def is_selection_groups(name):
return name.startswith('sel_groups_')
def is_reified_group(name):
return is_boolean_group(name) or is_selection_groups(name)
def get_boolean_group(name):
return int(name[9:])
def get_selection_groups(name):
return [int(v) for v in name[11:].split('_')]
def parse_m2m(commands):
"return a list of ids corresponding to a many2many value"
ids = []
for command in commands:
if isinstance(command, (tuple, list)):
if command[0] in (1, 4):
ids.append(command[1])
elif command[0] == 5:
ids = []
elif command[0] == 6:
ids = list(command[2])
else:
ids.append(command)
return ids
#----------------------------------------------------------
# Basic res.groups and res.users
#----------------------------------------------------------
class Groups(models.Model):
_name = "res.groups"
_description = "Access Groups"
_rec_name = 'full_name'
_order = 'name'
name = fields.Char(required=True, translate=True)
users = fields.Many2many('res.users', 'res_groups_users_rel', 'gid', 'uid')
model_access = fields.One2many('ir.model.access', 'group_id', string='Access Controls', copy=True)
rule_groups = fields.Many2many('ir.rule', 'rule_group_rel',
'group_id', 'rule_group_id', string='Rules', domain=[('global', '=', False)])
menu_access = fields.Many2many('ir.ui.menu', 'ir_ui_menu_group_rel', 'gid', 'menu_id', string='Access Menu')
view_access = fields.Many2many('ir.ui.view', 'ir_ui_view_group_rel', 'group_id', 'view_id', string='Views')
comment = fields.Text(translate=True)
category_id = fields.Many2one('ir.module.category', string='Application', index=True)
color = fields.Integer(string='Color Index')
full_name = fields.Char(compute='_compute_full_name', string='Group Name', search='_search_full_name')
share = fields.Boolean(string='Share Group', help="Group created to set access rights for sharing data with some users.")
_sql_constraints = [
('name_uniq', 'unique (category_id, name)', 'The name of the group must be unique within an application!')
]
@api.multi
@api.constrains('users')
def _check_one_user_type(self):
self.mapped('users')._check_one_user_type()
@api.depends('category_id.name', 'name')
def _compute_full_name(self):
# Important: value must be stored in environment of group, not group1!
for group, group1 in zip(self, self.sudo()):
if group1.category_id:
group.full_name = '%s / %s' % (group1.category_id.name, group1.name)
else:
group.full_name = group1.name
def _search_full_name(self, operator, operand):
lst = True
if isinstance(operand, bool):
domains = [[('name', operator, operand)], [('category_id.name', operator, operand)]]
if operator in expression.NEGATIVE_TERM_OPERATORS == (not operand):
return expression.AND(domains)
else:
return expression.OR(domains)
if isinstance(operand, str):
lst = False
operand = [operand]
where = []
for group in operand:
values = [v for v in group.split('/') if v]
group_name = values.pop().strip()
category_name = values and '/'.join(values).strip() or group_name
group_domain = [('name', operator, lst and [group_name] or group_name)]
category_domain = [('category_id.name', operator, lst and [category_name] or category_name)]
if operator in expression.NEGATIVE_TERM_OPERATORS and not values:
category_domain = expression.OR([category_domain, [('category_id', '=', False)]])
if (operator in expression.NEGATIVE_TERM_OPERATORS) == (not values):
sub_where = expression.AND([group_domain, category_domain])
else:
sub_where = expression.OR([group_domain, category_domain])
if operator in expression.NEGATIVE_TERM_OPERATORS:
where = expression.AND([where, sub_where])
else:
where = expression.OR([where, sub_where])
return where
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
# add explicit ordering if search is sorted on full_name
if order and order.startswith('full_name'):
groups = super(Groups, self).search(args)
groups = groups.sorted('full_name', reverse=order.endswith('DESC'))
groups = groups[offset:offset+limit] if limit else groups[offset:]
return len(groups) if count else groups.ids
return super(Groups, self)._search(args, offset=offset, limit=limit, order=order, count=count, access_rights_uid=access_rights_uid)
@api.multi
def copy(self, default=None):
self.ensure_one()
chosen_name = default.get('name') if default else ''
default_name = chosen_name or _('%s (copy)') % self.name
default = dict(default or {}, name=default_name)
return super(Groups, self).copy(default)
@api.multi
def write(self, vals):
if 'name' in vals:
if vals['name'].startswith('-'):
raise UserError(_('The name of the group can not start with "-"'))
# invalidate caches before updating groups, since the recomputation of
# field 'share' depends on method has_group()
self.env['ir.model.access'].call_cache_clearing_methods()
self.env['res.users'].has_group.clear_cache(self.env['res.users'])
return super(Groups, self).write(vals)
class ResUsersLog(models.Model):
_name = 'res.users.log'
_order = 'id desc'
_description = 'Users Log'
# Currenly only uses the magical fields: create_uid, create_date,
# for recording logins. To be extended for other uses (chat presence, etc.)
class Users(models.Model):
""" User class. A res.users record models an OpenERP user and is different
from an employee.
res.users class now inherits from res.partner. The partner model is
used to store the data related to the partner: lang, name, address,
avatar, ... The user model is now dedicated to technical data.
"""
_name = "res.users"
_description = 'Users'
_inherits = {'res.partner': 'partner_id'}
_order = 'name, login'
__uid_cache = defaultdict(dict) # {dbname: {uid: password}}
# User can write on a few of his own fields (but not his groups for example)
SELF_WRITEABLE_FIELDS = ['signature', 'action_id', 'company_id', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz']
# User can read a few of his own fields
SELF_READABLE_FIELDS = ['signature', 'company_id', 'login', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz', 'tz_offset', 'groups_id', 'partner_id', '__last_update', 'action_id']
def _default_groups(self):
default_user = self.env.ref('base.default_user', raise_if_not_found=False)
return (default_user or self.env['res.users']).sudo().groups_id
def _companies_count(self):
return self.env['res.company'].sudo().search_count([])
partner_id = fields.Many2one('res.partner', required=True, ondelete='restrict', auto_join=True,
string='Related Partner', help='Partner-related data of the user')
login = fields.Char(required=True, help="Used to log into the system")
password = fields.Char(
compute='_compute_password', inverse='_set_password',
invisible=True, copy=False,
help="Keep empty if you don't want the user to be able to connect on the system.")
new_password = fields.Char(string='Set Password',
compute='_compute_password', inverse='_set_new_password',
help="Specify a value only when creating a user or if you're "\
"changing the user's password, otherwise leave empty. After "\
"a change of password, the user has to login again.")
signature = fields.Html()
active = fields.Boolean(default=True)
active_partner = fields.Boolean(related='partner_id.active', readonly=True, string="Partner is Active")
action_id = fields.Many2one('ir.actions.actions', string='Home Action',
help="If specified, this action will be opened at log on for this user, in addition to the standard menu.")
groups_id = fields.Many2many('res.groups', 'res_groups_users_rel', 'uid', 'gid', string='Groups', default=_default_groups)
log_ids = fields.One2many('res.users.log', 'create_uid', string='User log entries')
login_date = fields.Datetime(related='log_ids.create_date', string='Latest authentication', readonly=False)
share = fields.Boolean(compute='_compute_share', compute_sudo=True, string='Share User', store=True,
help="External user with limited access, created only for the purpose of sharing data.")
companies_count = fields.Integer(compute='_compute_companies_count', string="Number of Companies", default=_companies_count)
tz_offset = fields.Char(compute='_compute_tz_offset', string='Timezone offset', invisible=True)
@api.model
def _get_company(self):
return self.env.user.company_id
# Special behavior for this field: res.company.search() will only return the companies
# available to the current user (should be the user's companies?), when the user_preference
# context is set.
company_id = fields.Many2one('res.company', string='Company', required=True, default=_get_company,
help='The company this user is currently working for.', context={'user_preference': True})
company_ids = fields.Many2many('res.company', 'res_company_users_rel', 'user_id', 'cid',
string='Companies', default=_get_company)
# overridden inherited fields to bypass access rights, in case you have
# access to the user but not its corresponding partner
name = fields.Char(related='partner_id.name', inherited=True, readonly=False)
email = fields.Char(related='partner_id.email', inherited=True, readonly=False)
accesses_count = fields.Integer('# Access Rights', help='Number of access rights that apply to the current user',
compute='_compute_accesses_count')
rules_count = fields.Integer('# Record Rules', help='Number of record rules that apply to the current user',
compute='_compute_accesses_count')
groups_count = fields.Integer('# Groups', help='Number of groups that apply to the current user',
compute='_compute_accesses_count')
_sql_constraints = [
('login_key', 'UNIQUE (login)', 'You can not have two users with the same login !')
]
def init(self):
cr = self.env.cr
# allow setting plaintext passwords via SQL and have them
# automatically encrypted at startup: look for passwords which don't
# match the "extended" MCF and pass those through passlib.
# Alternative: iterate on *all* passwords and use CryptContext.identify
cr.execute("""
SELECT id, password FROM res_users
WHERE password IS NOT NULL
AND password !~ '^\$[^$]+\$[^$]+\$.'
""")
if self.env.cr.rowcount:
Users = self.sudo()
for uid, pw in cr.fetchall():
Users.browse(uid).password = pw
def _set_password(self):
ctx = self._crypt_context()
for user in self:
self._set_encrypted_password(user.id, ctx.encrypt(user.password))
def _set_encrypted_password(self, uid, pw):
assert self._crypt_context().identify(pw) != 'plaintext'
self.env.cr.execute(
'UPDATE res_users SET password=%s WHERE id=%s',
(pw, uid)
)
self.invalidate_cache(['password'], [uid])
def _check_credentials(self, password):
""" Validates the current user's password.
Override this method to plug additional authentication methods.
Overrides should:
* call `super` to delegate to parents for credentials-checking
* catch AccessDenied and perform their own checking
* (re)raise AccessDenied if the credentials are still invalid
according to their own validation method
When trying to check for credentials validity, call _check_credentials
instead.
"""
""" Override this method to plug additional authentication methods"""
assert password
self.env.cr.execute(
"SELECT COALESCE(password, '') FROM res_users WHERE id=%s",
[self.env.user.id]
)
[hashed] = self.env.cr.fetchone()
valid, replacement = self._crypt_context()\
.verify_and_update(password, hashed)
if replacement is not None:
self._set_encrypted_password(self.env.user.id, replacement)
if not valid:
raise AccessDenied()
def _compute_password(self):
for user in self:
user.password = ''
user.new_password = ''
def _set_new_password(self):
for user in self:
if not user.new_password:
# Do not update the password if no value is provided, ignore silently.
# For example web client submits False values for all empty fields.
continue
if user == self.env.user:
# To change their own password, users must use the client-specific change password wizard,
# so that the new password is immediately used for further RPC requests, otherwise the user
# will face unexpected 'Access Denied' exceptions.
raise UserError(_('Please use the change password wizard (in User Preferences or User menu) to change your own password.'))
else:
user.password = user.new_password
@api.depends('groups_id')
def _compute_share(self):
for user in self:
user.share = not user.has_group('base.group_user')
@api.multi
def _compute_companies_count(self):
companies_count = self._companies_count()
for user in self:
user.companies_count = companies_count
@api.depends('tz')
def _compute_tz_offset(self):
for user in self:
user.tz_offset = datetime.datetime.now(pytz.timezone(user.tz or 'GMT')).strftime('%z')
@api.depends('groups_id')
def _compute_accesses_count(self):
for user in self:
groups = user.groups_id
user.accesses_count = len(groups.mapped('model_access'))
user.rules_count = len(groups.mapped('rule_groups'))
user.groups_count = len(groups)
@api.onchange('login')
def on_change_login(self):
if self.login and tools.single_email_re.match(self.login):
self.email = self.login
@api.onchange('parent_id')
def onchange_parent_id(self):
return self.mapped('partner_id').onchange_parent_id()
def _read_from_database(self, field_names, inherited_field_names=[]):
super(Users, self)._read_from_database(field_names, inherited_field_names)
canwrite = self.check_access_rights('write', raise_exception=False)
if not canwrite and set(USER_PRIVATE_FIELDS).intersection(field_names):
for record in self:
for f in USER_PRIVATE_FIELDS:
try:
record._cache[f]
record._cache[f] = '********'
except Exception:
# skip SpecialValue (e.g. for missing record or access right)
pass
@api.multi
@api.constrains('company_id', 'company_ids')
def _check_company(self):
if any(user.company_ids and user.company_id not in user.company_ids for user in self):
raise ValidationError(_('The chosen company is not in the allowed companies for this user'))
@api.multi
@api.constrains('action_id')
def _check_action_id(self):
action_open_website = self.env.ref('base.action_open_website', raise_if_not_found=False)
if action_open_website and any(user.action_id.id == action_open_website.id for user in self):
raise ValidationError(_('The "App Switcher" action cannot be selected as home action.'))
@api.multi
@api.constrains('groups_id')
def _check_one_user_type(self):
for user in self:
if len(user.groups_id.filtered(lambda x: x.category_id.xml_id == 'base.module_category_user_type')) > 1:
raise ValidationError(_('The user cannot have more than one user types.'))
@api.multi
def toggle_active(self):
for user in self:
if not user.active and not user.partner_id.active:
user.partner_id.toggle_active()
super(Users, self).toggle_active()
@api.multi
def read(self, fields=None, load='_classic_read'):
if fields and self == self.env.user:
for key in fields:
if not (key in self.SELF_READABLE_FIELDS or key.startswith('context_')):
break
else:
# safe fields only, so we read as super-user to bypass access rights
self = self.sudo()
return super(Users, self).read(fields=fields, load=load)
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
groupby_fields = set([groupby] if isinstance(groupby, str) else groupby)
if groupby_fields.intersection(USER_PRIVATE_FIELDS):
raise AccessError(_("Invalid 'group by' parameter"))
return super(Users, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
if self._uid != SUPERUSER_ID and args:
domain_fields = {term[0] for term in args if isinstance(term, (tuple, list))}
if domain_fields.intersection(USER_PRIVATE_FIELDS):
raise AccessError(_('Invalid search criterion'))
return super(Users, self)._search(args, offset=offset, limit=limit, order=order, count=count,
access_rights_uid=access_rights_uid)
@api.model_create_multi
def create(self, vals_list):
users = super(Users, self.with_context(default_customer=False)).create(vals_list)
for user in users:
user.partner_id.active = user.active
if user.partner_id.company_id:
user.partner_id.write({'company_id': user.company_id.id})
return users
@api.multi
def write(self, values):
if values.get('active') and SUPERUSER_ID in self._ids:
raise UserError(_("You cannot activate the superuser."))
if values.get('active') == False and self._uid in self._ids:
raise UserError(_("You cannot deactivate the user you're currently logged in as."))
if values.get('active'):
for user in self:
if not user.active and not user.partner_id.active:
user.partner_id.toggle_active()
if self == self.env.user:
for key in list(values):
if not (key in self.SELF_WRITEABLE_FIELDS or key.startswith('context_')):
break
else:
if 'company_id' in values:
if values['company_id'] not in self.env.user.company_ids.ids:
del values['company_id']
# safe fields only, so we write as super-user to bypass access rights
self = self.sudo().with_context(binary_field_real_user=self.env.user)
res = super(Users, self).write(values)
if 'company_id' in values:
for user in self:
# if partner is global we keep it that way
if user.partner_id.company_id and user.partner_id.company_id.id != values['company_id']:
user.partner_id.write({'company_id': user.company_id.id})
# clear default ir values when company changes
self.env['ir.default'].clear_caches()
# clear caches linked to the users
if 'groups_id' in values:
self.env['ir.model.access'].call_cache_clearing_methods()
self.env['ir.rule'].clear_caches()
self.has_group.clear_cache(self)
if any(key.startswith('context_') or key in ('lang', 'tz') for key in values):
self.context_get.clear_cache(self)
if any(key in values for key in ['active'] + USER_PRIVATE_FIELDS):
db = self._cr.dbname
for id in self.ids:
self.__uid_cache[db].pop(id, None)
if any(key in values for key in self._get_session_token_fields()):
self._invalidate_session_cache()
return res
@api.multi
def unlink(self):
if SUPERUSER_ID in self.ids:
raise UserError(_('You can not remove the admin user as it is used internally for resources created by Odoo (updates, module installation, ...)'))
db = self._cr.dbname
for id in self.ids:
self.__uid_cache[db].pop(id, None)
self._invalidate_session_cache()
return super(Users, self).unlink()
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
args = args or []
if operator == 'ilike' and not (name or '').strip():
domain = []
else:
domain = [('login', '=', name)]
user_ids = self._search(expression.AND([domain, args]), limit=limit, access_rights_uid=name_get_uid)
if not user_ids:
user_ids = self._search(expression.AND([[('name', operator, name)], args]), limit=limit, access_rights_uid=name_get_uid)
return self.browse(user_ids).name_get()
@api.multi
def copy(self, default=None):
self.ensure_one()
default = dict(default or {})
if ('name' not in default) and ('partner_id' not in default):
default['name'] = _("%s (copy)") % self.name
if 'login' not in default:
default['login'] = _("%s (copy)") % self.login
return super(Users, self).copy(default)
@api.model
@tools.ormcache('self._uid')
def context_get(self):
user = self.env.user
# determine field names to read
name_to_key = {
name: name[8:] if name.startswith('context_') else name
for name in self._fields
if name.startswith('context_') or name in ('lang', 'tz')
}
# use read() to not read other fields: this must work while modifying
# the schema of models res.users or res.partner
values = user.read(list(name_to_key), load=False)[0]
return {
key: values[name]
for name, key in name_to_key.items()
}
@api.model
def action_get(self):
return self.sudo().env.ref('base.action_res_users_my').read()[0]
def check_super(self, passwd):
return check_super(passwd)
@api.model
def _update_last_login(self):
# only create new records to avoid any side-effect on concurrent transactions
# extra records will be deleted by the periodical garbage collection
self.env['res.users.log'].create({}) # populated by defaults
@api.model
def _get_login_domain(self, login):
return [('login', '=', login)]
@classmethod
def _login(cls, db, login, password):
if not password:
raise AccessDenied()
ip = request.httprequest.environ['REMOTE_ADDR'] if request else 'n/a'
try:
with cls.pool.cursor() as cr:
self = api.Environment(cr, SUPERUSER_ID, {})[cls._name]
with self._assert_can_auth():
user = self.search(self._get_login_domain(login))
if not user:
raise AccessDenied()
user = user.sudo(user.id)
user._check_credentials(password)
user._update_last_login()
except AccessDenied:
_logger.info("Login failed for db:%s login:%s from %s", db, login, ip)
raise
_logger.info("Login successful for db:%s login:%s from %s", db, login, ip)
return user.id
@classmethod
def authenticate(cls, db, login, password, user_agent_env):
"""Verifies and returns the user ID corresponding to the given
``login`` and ``password`` combination, or False if there was
no matching user.
:param str db: the database on which user is trying to authenticate
:param str login: username
:param str password: user password
:param dict user_agent_env: environment dictionary describing any
relevant environment attributes
"""
uid = cls._login(db, login, password)
if user_agent_env and user_agent_env.get('base_location'):
with cls.pool.cursor() as cr:
env = api.Environment(cr, uid, {})
if env.user.has_group('base.group_system'):
# Successfully logged in as system user!
# Attempt to guess the web base url...
try:
base = user_agent_env['base_location']
ICP = env['ir.config_parameter']
if not ICP.get_param('web.base.url.freeze'):
ICP.set_param('web.base.url', base)
except Exception:
_logger.exception("Failed to update web.base.url configuration parameter")
return uid
@classmethod
def check(cls, db, uid, passwd):
"""Verifies that the given (uid, password) is authorized for the database ``db`` and
raise an exception if it is not."""
if not passwd:
# empty passwords disallowed for obvious security reasons
raise AccessDenied()
db = cls.pool.db_name
if cls.__uid_cache[db].get(uid) == passwd:
return
cr = cls.pool.cursor()
try:
self = api.Environment(cr, uid, {})[cls._name]
with self._assert_can_auth():
self._check_credentials(passwd)
cls.__uid_cache[db][uid] = passwd
finally:
cr.close()
def _get_session_token_fields(self):
return {'id', 'login', 'password', 'active'}
@tools.ormcache('sid')
def _compute_session_token(self, sid):
""" Compute a session token given a session id and a user id """
# retrieve the fields used to generate the session token
session_fields = ', '.join(sorted(self._get_session_token_fields()))
self.env.cr.execute("""SELECT %s, (SELECT value FROM ir_config_parameter WHERE key='database.secret')
FROM res_users
WHERE id=%%s""" % (session_fields), (self.id,))
if self.env.cr.rowcount != 1:
self._invalidate_session_cache()
return False
data_fields = self.env.cr.fetchone()
# generate hmac key
key = (u'%s' % (data_fields,)).encode('utf-8')
# hmac the session id
data = sid.encode('utf-8')
h = hmac.new(key, data, sha256)
# keep in the cache the token
return h.hexdigest()
@api.multi
def _invalidate_session_cache(self):
""" Clear the sessions cache """
self._compute_session_token.clear_cache(self)
@api.model
def change_password(self, old_passwd, new_passwd):
"""Change current user password. Old password must be provided explicitly
to prevent hijacking an existing user session, or for cases where the cleartext
password is not used to authenticate requests.
:return: True
:raise: odoo.exceptions.AccessDenied when old password is wrong
:raise: odoo.exceptions.UserError when new password is not set or empty
"""
self.check(self._cr.dbname, self._uid, old_passwd)
if new_passwd:
# use self.env.user here, because it has uid=SUPERUSER_ID
return self.env.user.write({'password': new_passwd})
raise UserError(_("Setting empty passwords is not allowed for security reasons!"))
@api.multi
def preference_save(self):
return {
'type': 'ir.actions.client',
'tag': 'reload_context',
}
@api.multi
def preference_change_password(self):
return {
'type': 'ir.actions.client',
'tag': 'change_password',
'target': 'new',
}
@api.model
def has_group(self, group_ext_id):
# use singleton's id if called on a non-empty recordset, otherwise
# context uid
uid = self.id or self._uid
return self.sudo(user=uid)._has_group(group_ext_id)
@api.model
@tools.ormcache('self._uid', 'group_ext_id')
def _has_group(self, group_ext_id):
"""Checks whether user belongs to given group.
:param str group_ext_id: external ID (XML ID) of the group.
Must be provided in fully-qualified form (``module.ext_id``), as there
is no implicit module to use..
:return: True if the current user is a member of the group with the
given external ID (XML ID), else False.
"""
assert group_ext_id and '.' in group_ext_id, "External ID '%s' must be fully qualified" % group_ext_id
module, ext_id = group_ext_id.split('.')
self._cr.execute("""SELECT 1 FROM res_groups_users_rel WHERE uid=%s AND gid IN
(SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s)""",
(self._uid, module, ext_id))
return bool(self._cr.fetchone())
# for a few places explicitly clearing the has_group cache
has_group.clear_cache = _has_group.clear_cache
def action_show_groups(self):
self.ensure_one()
return {
'name': _('Groups'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'res.groups',
'type': 'ir.actions.act_window',
'context': {'create': False, 'delete': False},
'domain': [('id','in', self.groups_id.ids)],
'target': 'current',
}
def action_show_accesses(self):
self.ensure_one()
return {
'name': _('Access Rights'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'ir.model.access',
'type': 'ir.actions.act_window',
'context': {'create': False, 'delete': False},
'domain': [('id', 'in', self.mapped('groups_id.model_access').ids)],
'target': 'current',
}
def action_show_rules(self):
self.ensure_one()
return {
'name': _('Record Rules'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'ir.rule',
'type': 'ir.actions.act_window',
'context': {'create': False, 'delete': False},
'domain': [('id', 'in', self.mapped('groups_id.rule_groups').ids)],
'target': 'current',
}
@api.multi
def _is_public(self):
self.ensure_one()
return self.has_group('base.group_public')
@api.multi
def _is_system(self):
self.ensure_one()
return self.has_group('base.group_system')
@api.multi
def _is_admin(self):
self.ensure_one()
return self._is_superuser() or self.has_group('base.group_erp_manager')
@api.multi
def _is_superuser(self):
self.ensure_one()
return self.id == SUPERUSER_ID
@api.model
def get_company_currency_id(self):
return self.env.user.company_id.currency_id.id
def _crypt_context(self):
""" Passlib CryptContext instance used to encrypt and verify
passwords. Can be overridden if technical, legal or political matters
require different kdfs than the provided default.
Requires a CryptContext as deprecation and upgrade notices are used
internally
"""
return DEFAULT_CRYPT_CONTEXT
@contextlib.contextmanager
def _assert_can_auth(self):
""" Checks that the current environment even allows the current auth
request to happen.
The baseline implementation is a simple linear login cooldown: after
a number of failures trying to log-in, the user (by login) is put on
cooldown. During the cooldown period, login *attempts* are ignored
and logged.
.. warning::
The login counter is not shared between workers and not
specifically thread-safe, the feature exists mostly for
rate-limiting on large number of login attempts (brute-forcing
passwords) so that should not be much of an issue.
For a more complex strategy (e.g. database or distribute storage)
override this method. To simply change the cooldown criteria
(configuration, ...) override _on_login_cooldown instead.
.. note::
This is a *context manager* so it can be called around the login
procedure without having to call it itself.
"""
# needs request for remote address
if not request:
yield
return
reg = self.env.registry
failures_map = getattr(reg, '_login_failures', None)
if failures_map is None:
failures_map = reg._login_failures = collections.defaultdict(lambda : (0, datetime.datetime.min))
source = request.httprequest.remote_addr
(failures, previous) = failures_map[source]
if self._on_login_cooldown(failures, previous):
_logger.warn(
"Login attempt ignored for %s on %s: "
"%d failures since last success, last failure at %s. "
"You can configure the number of login failures before a "
"user is put on cooldown as well as the duration in the "
"System Parameters. Disable this feature by setting "
"\"base.login_cooldown_after\" to 0.",
source, self.env.cr.dbname, failures, previous)
if ipaddress.ip_address(source).is_private:
_logger.warn(
"The rate-limited IP address %s is classified as private "
"and *might* be a proxy. If your Odoo is behind a proxy, "
"it may be mis-configured. Check that you are running "
"Odoo in Proxy Mode and that the proxy is properly configured, see "
"https://www.odoo.com/documentation/12.0/setup/deploy.html#https for details.",
source
)
raise AccessDenied(_("Too many login failures, please wait a bit before trying again."))
try:
yield
except AccessDenied:
(failures, __) = reg._login_failures[source]
reg._login_failures[source] = (failures + 1, datetime.datetime.now())
raise
else:
reg._login_failures.pop(source, None)
def _on_login_cooldown(self, failures, previous):
""" Decides whether the user trying to log in is currently
"on cooldown" and not even allowed to attempt logging in.
The default cooldown function simply puts the user on cooldown for
<login_cooldown_duration> seconds after each failure following the
<login_cooldown_after>th (0 to disable).
Can be overridden to implement more complex backoff strategies, or
e.g. wind down or reset the cooldown period as the previous failure
recedes into the far past.
:param int failures: number of recorded failures (since last success)
:param previous: timestamp of previous failure
:type previous: datetime.datetime
:returns: whether the user is currently in cooldown phase (true if cooldown, false if no cooldown and login can continue)
:rtype: bool
"""
cfg = self.env['ir.config_parameter'].sudo()
min_failures = int(cfg.get_param('base.login_cooldown_after', 5))
if min_failures == 0:
return True
delay = int(cfg.get_param('base.login_cooldown_duration', 60))
return failures >= min_failures and (datetime.datetime.now() - previous) < datetime.timedelta(seconds=delay)
def _register_hook(self):
if hasattr(self, 'check_credentials'):
_logger.warn("The check_credentials method of res.users has been renamed _check_credentials. One of your installed modules defines one, but it will not be called anymore.")
#
# Implied groups
#
# Extension of res.groups and res.users with a relation for "implied" or
# "inherited" groups. Once a user belongs to a group, it automatically belongs
# to the implied groups (transitively).
#
class GroupsImplied(models.Model):
_inherit = 'res.groups'
implied_ids = fields.Many2many('res.groups', 'res_groups_implied_rel', 'gid', 'hid',
string='Inherits', help='Users of this group automatically inherit those groups')
trans_implied_ids = fields.Many2many('res.groups', string='Transitively inherits',
compute='_compute_trans_implied')
@api.depends('implied_ids.trans_implied_ids')
def _compute_trans_implied(self):
# Compute the transitive closure recursively. Note that the performance
# is good, because the record cache behaves as a memo (the field is
# never computed twice on a given group.)
for g in self:
g.trans_implied_ids = g.implied_ids | g.mapped('implied_ids.trans_implied_ids')
@api.model_create_multi
def create(self, vals_list):
user_ids_list = [vals.pop('users', None) for vals in vals_list]
groups = super(GroupsImplied, self).create(vals_list)
for group, user_ids in zip(groups, user_ids_list):
if user_ids:
# delegate addition of users to add implied groups
group.write({'users': user_ids})
return groups
@api.multi
def write(self, values):
res = super(GroupsImplied, self).write(values)
if values.get('users') or values.get('implied_ids'):
# add all implied groups (to all users of each group)
for group in self:
self._cr.execute("""
WITH RECURSIVE group_imply(gid, hid) AS (
SELECT gid, hid
FROM res_groups_implied_rel
UNION
SELECT i.gid, r.hid
FROM res_groups_implied_rel r
JOIN group_imply i ON (i.hid = r.gid)
)
INSERT INTO res_groups_users_rel (gid, uid)
SELECT i.hid, r.uid
FROM group_imply i, res_groups_users_rel r
WHERE r.gid = i.gid
AND i.gid = %(gid)s
EXCEPT
SELECT r.gid, r.uid
FROM res_groups_users_rel r
JOIN group_imply i ON (r.gid = i.hid)
WHERE i.gid = %(gid)s
""", dict(gid=group.id))
return res
class UsersImplied(models.Model):
_inherit = 'res.users'
@api.model_create_multi
def create(self, vals_list):
for values in vals_list:
if 'groups_id' in values:
# complete 'groups_id' with implied groups
user = self.new(values)
group_public = self.env.ref('base.group_public', raise_if_not_found=False)
group_portal = self.env.ref('base.group_portal', raise_if_not_found=False)
if group_public and group_public in user.groups_id:
gs = self.env.ref('base.group_public') | self.env.ref('base.group_public').trans_implied_ids
elif group_portal and group_portal in user.groups_id:
gs = self.env.ref('base.group_portal') | self.env.ref('base.group_portal').trans_implied_ids
else:
gs = user.groups_id | user.groups_id.mapped('trans_implied_ids')
values['groups_id'] = type(self).groups_id.convert_to_write(gs, user.groups_id)
return super(UsersImplied, self).create(vals_list)
@api.multi
def write(self, values):
res = super(UsersImplied, self).write(values)
if values.get('groups_id'):
# add implied groups for all users
for user in self.with_context({}):
if not user.has_group('base.group_user'):
vals = {'groups_id': [(5, 0, 0)] + values['groups_id']}
super(UsersImplied, user).write(vals)
gs = set(concat(g.trans_implied_ids for g in user.groups_id))
vals = {'groups_id': [(4, g.id) for g in gs]}
super(UsersImplied, user).write(vals)
return res
#
# Virtual checkbox and selection for res.user form view
#
# Extension of res.groups and res.users for the special groups view in the users
# form. This extension presents groups with selection and boolean widgets:
# - Groups are shown by application, with boolean and/or selection fields.
# Selection fields typically defines a role "Name" for the given application.
# - Uncategorized groups are presented as boolean fields and grouped in a
# section "Others".
#
# The user form view is modified by an inherited view (base.user_groups_view);
# the inherited view replaces the field 'groups_id' by a set of reified group
# fields (boolean or selection fields). The arch of that view is regenerated
# each time groups are changed.
#
# Naming conventions for reified groups fields:
# - boolean field 'in_group_ID' is True iff
# ID is in 'groups_id'
# - selection field 'sel_groups_ID1_..._IDk' is ID iff
# ID is in 'groups_id' and ID is maximal in the set {ID1, ..., IDk}
#
class GroupsView(models.Model):
_inherit = 'res.groups'
@api.model
def create(self, values):
user = super(GroupsView, self).create(values)
self._update_user_groups_view()
# actions.get_bindings() depends on action records
self.env['ir.actions.actions'].clear_caches()
return user
@api.multi
def write(self, values):
res = super(GroupsView, self).write(values)
self._update_user_groups_view()
# actions.get_bindings() depends on action records
self.env['ir.actions.actions'].clear_caches()
return res
@api.multi
def unlink(self):
res = super(GroupsView, self).unlink()
self._update_user_groups_view()
# actions.get_bindings() depends on action records
self.env['ir.actions.actions'].clear_caches()
return res
@api.model
def _update_user_groups_view(self):
""" Modify the view with xmlid ``base.user_groups_view``, which inherits
the user form view, and introduces the reified group fields.
"""
if self._context.get('install_mode'):
# use installation/admin language for translatable names in the view
user_context = self.env['res.users'].context_get()
self = self.with_context(**user_context)
# We have to try-catch this, because at first init the view does not
# exist but we are already creating some basic groups.
view = self.env.ref('base.user_groups_view', raise_if_not_found=False)
if view and view.exists() and view._name == 'ir.ui.view':
group_no_one = view.env.ref('base.group_no_one')
group_employee = view.env.ref('base.group_user')
xml1, xml2, xml3 = [], [], []
xml_by_category = {}
xml1.append(E.separator(string=_('User Type'), colspan="2", groups='base.group_no_one'))
user_type_field_name = ''
for app, kind, gs, category_name in self.get_groups_by_application():
attrs = {}
# hide groups in categories 'Hidden' and 'Extra' (except for group_no_one)
if app.xml_id in ('base.module_category_hidden', 'base.module_category_extra', 'base.module_category_usability'):
attrs['groups'] = 'base.group_no_one'
# User type (employee, portal or public) is a separated group. This is the only 'selection'
# group of res.groups without implied groups (with each other).
if app.xml_id == 'base.module_category_user_type':
# application name with a selection field
field_name = name_selection_groups(gs.ids)
user_type_field_name = field_name
attrs['widget'] = 'radio'
attrs['groups'] = 'base.group_no_one'
xml1.append(E.field(name=field_name, **attrs))
xml1.append(E.newline())
elif kind == 'selection':
# application name with a selection field
field_name = name_selection_groups(gs.ids)
if category_name not in xml_by_category:
xml_by_category[category_name] = []
xml_by_category[category_name].append(E.newline())
xml_by_category[category_name].append(E.field(name=field_name, **attrs))
xml_by_category[category_name].append(E.newline())
else:
# application separator with boolean fields
app_name = app.name or _('Other')
xml3.append(E.separator(string=app_name, colspan="4", **attrs))
for g in gs:
field_name = name_boolean_group(g.id)
if g == group_no_one:
# make the group_no_one invisible in the form view
xml3.append(E.field(name=field_name, invisible="1", **attrs))
else:
xml3.append(E.field(name=field_name, **attrs))
xml3.append({'class': "o_label_nowrap"})
if user_type_field_name:
user_type_attrs = {'invisible': [(user_type_field_name, '!=', group_employee.id)]}
else:
user_type_attrs = {}
for xml_cat in sorted(xml_by_category.keys(), key=lambda it: it[0]):
xml_cat_name = xml_cat[1]
master_category_name = (_(xml_cat_name))
xml2.append(E.group(*(xml_by_category[xml_cat]), col="2", string=master_category_name))
xml = E.field(
E.group(*(xml1), col="2"),
E.group(*(xml2), col="2", attrs=str(user_type_attrs)),
E.group(*(xml3), col="4", attrs=str(user_type_attrs)), name="groups_id", position="replace")
xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS"))
xml_content = etree.tostring(xml, pretty_print=True, encoding="unicode")
new_context = dict(view._context)
new_context.pop('install_mode_data', None) # don't set arch_fs for this computed view
new_context['lang'] = None
view.with_context(new_context).write({'arch': xml_content})
def get_application_groups(self, domain):
""" Return the non-share groups that satisfy ``domain``. """
return self.search(domain + [('share', '=', False)])
@api.model
def get_groups_by_application(self):
""" Return all groups classified by application (module category), as a list::
[(app, kind, groups), ...],
where ``app`` and ``groups`` are recordsets, and ``kind`` is either
``'boolean'`` or ``'selection'``. Applications are given in sequence
order. If ``kind`` is ``'selection'``, ``groups`` are given in
reverse implication order.
"""
def linearize(app, gs, category_name):
# 'User Type' is an exception
if app.xml_id == 'base.module_category_user_type':
return (app, 'selection', gs.sorted('id'), category_name)
# determine sequence order: a group appears after its implied groups
order = {g: len(g.trans_implied_ids & gs) for g in gs}
# check whether order is total, i.e., sequence orders are distinct
if len(set(order.values())) == len(gs):
return (app, 'selection', gs.sorted(key=order.get), category_name)
else:
return (app, 'boolean', gs, (100, 'Other'))
# classify all groups by application
by_app, others = defaultdict(self.browse), self.browse()
for g in self.get_application_groups([]):
if g.category_id:
by_app[g.category_id] += g
else:
others += g
# build the result
res = []
for app, gs in sorted(by_app.items(), key=lambda it: it[0].sequence or 0):
if app.parent_id:
res.append(linearize(app, gs, (app.parent_id.sequence, app.parent_id.name)))
else:
res.append(linearize(app, gs, (100, 'Other')))
if others:
res.append((self.env['ir.module.category'], 'boolean', others, (100,'Other')))
return res
class UsersView(models.Model):
_inherit = 'res.users'
@api.model
def create(self, values):
values = self._remove_reified_groups(values)
user = super(UsersView, self).create(values)
group_multi_company = self.env.ref('base.group_multi_company', False)
if group_multi_company and 'company_ids' in values:
if len(user.company_ids) <= 1 and user.id in group_multi_company.users.ids:
group_multi_company.write({'users': [(3, user.id)]})
elif len(user.company_ids) > 1 and user.id not in group_multi_company.users.ids:
group_multi_company.write({'users': [(4, user.id)]})
return user
@api.multi
def write(self, values):
values = self._remove_reified_groups(values)
res = super(UsersView, self).write(values)
group_multi_company = self.env.ref('base.group_multi_company', False)
if group_multi_company and 'company_ids' in values:
for user in self:
if len(user.company_ids) <= 1 and user.id in group_multi_company.users.ids:
group_multi_company.write({'users': [(3, user.id)]})
elif len(user.company_ids) > 1 and user.id not in group_multi_company.users.ids:
group_multi_company.write({'users': [(4, user.id)]})
return res
def _remove_reified_groups(self, values):
""" return `values` without reified group fields """
add, rem = [], []
values1 = {}
for key, val in values.items():
if is_boolean_group(key):
(add if val else rem).append(get_boolean_group(key))
elif is_selection_groups(key):
rem += get_selection_groups(key)
if val:
add.append(val)
else:
values1[key] = val
if 'groups_id' not in values and (add or rem):
# remove group ids in `rem` and add group ids in `add`
values1['groups_id'] = list(itertools.chain(
zip(repeat(3), rem),
zip(repeat(4), add)
))
return values1
@api.model
def default_get(self, fields):
group_fields, fields = partition(is_reified_group, fields)
fields1 = (fields + ['groups_id']) if group_fields else fields
values = super(UsersView, self).default_get(fields1)
self._add_reified_groups(group_fields, values)
return values
@api.multi
def read(self, fields=None, load='_classic_read'):
# determine whether reified groups fields are required, and which ones
fields1 = fields or list(self.fields_get())
group_fields, other_fields = partition(is_reified_group, fields1)
# read regular fields (other_fields); add 'groups_id' if necessary
drop_groups_id = False
if group_fields and fields:
if 'groups_id' not in other_fields:
other_fields.append('groups_id')
drop_groups_id = True
else:
other_fields = fields
res = super(UsersView, self).read(other_fields, load=load)
# post-process result to add reified group fields
if group_fields:
for values in res:
self._add_reified_groups(group_fields, values)
if drop_groups_id:
values.pop('groups_id', None)
return res
def _add_reified_groups(self, fields, values):
""" add the given reified group fields into `values` """
gids = set(parse_m2m(values.get('groups_id') or []))
for f in fields:
if is_boolean_group(f):
values[f] = get_boolean_group(f) in gids
elif is_selection_groups(f):
selected = [gid for gid in get_selection_groups(f) if gid in gids]
# if 'Internal User' is in the group, this is the "User Type" group
# and we need to show 'Internal User' selected, not Public/Portal.
if self.env.ref('base.group_user').id in selected:
values[f] = self.env.ref('base.group_user').id
else:
values[f] = selected and selected[-1] or False
@api.model
def fields_get(self, allfields=None, attributes=None):
res = super(UsersView, self).fields_get(allfields, attributes=attributes)
# add reified groups fields
for app, kind, gs, category_name in self.env['res.groups'].sudo().get_groups_by_application():
if kind == 'selection':
# 'User Type' should not be 'False'. A user is either 'employee', 'portal' or 'public' (required).
selection_vals = [(False, '')]
if app.xml_id == 'base.module_category_user_type':
selection_vals = []
field_name = name_selection_groups(gs.ids)
if allfields and field_name not in allfields:
continue
# selection group field
tips = ['%s: %s' % (g.name, g.comment) for g in gs if g.comment]
res[field_name] = {
'type': 'selection',
'string': app.name or _('Other'),
'selection': selection_vals + [(g.id, g.name) for g in gs],
'help': '\n'.join(tips),
'exportable': False,
'selectable': False,
}
else:
# boolean group fields
for g in gs:
field_name = name_boolean_group(g.id)
if allfields and field_name not in allfields:
continue
res[field_name] = {
'type': 'boolean',
'string': g.name,
'help': g.comment,
'exportable': False,
'selectable': False,
}
return res
#----------------------------------------------------------
# change password wizard
#----------------------------------------------------------
class ChangePasswordWizard(models.TransientModel):
""" A wizard to manage the change of users' passwords. """
_name = "change.password.wizard"
_description = "Change Password Wizard"
def _default_user_ids(self):
user_ids = self._context.get('active_model') == 'res.users' and self._context.get('active_ids') or []
return [
(0, 0, {'user_id': user.id, 'user_login': user.login})
for user in self.env['res.users'].browse(user_ids)
]
user_ids = fields.One2many('change.password.user', 'wizard_id', string='Users', default=_default_user_ids)
@api.multi
def change_password_button(self):
self.ensure_one()
self.user_ids.change_password_button()
if self.env.user in self.mapped('user_ids.user_id'):
return {'type': 'ir.actions.client', 'tag': 'reload'}
return {'type': 'ir.actions.act_window_close'}
class ChangePasswordUser(models.TransientModel):
""" A model to configure users in the change password wizard. """
_name = 'change.password.user'
_description = 'User, Change Password Wizard'
wizard_id = fields.Many2one('change.password.wizard', string='Wizard', required=True, ondelete='cascade')
user_id = fields.Many2one('res.users', string='User', required=True, ondelete='cascade')
user_login = fields.Char(string='User Login', readonly=True)
new_passwd = fields.Char(string='New Password', default='')
@api.multi
def change_password_button(self):
for line in self:
if not line.new_passwd:
raise UserError(_("Before clicking on 'Change Password', you have to write a new password."))
line.user_id.write({'password': line.new_passwd})
# don't keep temporary passwords in the database longer than necessary
self.write({'new_passwd': False})
| t3dev/odoo | odoo/addons/base/models/res_users.py | Python | gpl-3.0 | 59,781 |
import os
import shutil
import tempfile
from contextlib import contextmanager
from importlib import import_module
from django.apps import apps
from django.db import connections
from django.db.migrations.recorder import MigrationRecorder
from django.test import TransactionTestCase
from django.test.utils import extend_sys_path
from django.utils.module_loading import module_dir
class MigrationTestBase(TransactionTestCase):
"""
Contains an extended set of asserts for testing migrations and schema operations.
"""
available_apps = ["migrations"]
def tearDown(self):
# Reset applied-migrations state.
for db in connections:
recorder = MigrationRecorder(connections[db])
recorder.migration_qs.filter(app='migrations').delete()
def get_table_description(self, table, using='default'):
with connections[using].cursor() as cursor:
return connections[using].introspection.get_table_description(cursor, table)
def assertTableExists(self, table, using='default'):
with connections[using].cursor() as cursor:
self.assertIn(table, connections[using].introspection.table_names(cursor))
def assertTableNotExists(self, table, using='default'):
with connections[using].cursor() as cursor:
self.assertNotIn(table, connections[using].introspection.table_names(cursor))
def assertColumnExists(self, table, column, using='default'):
self.assertIn(column, [c.name for c in self.get_table_description(table, using=using)])
def assertColumnNotExists(self, table, column, using='default'):
self.assertNotIn(column, [c.name for c in self.get_table_description(table, using=using)])
def _get_column_allows_null(self, table, column, using):
return [c.null_ok for c in self.get_table_description(table, using=using) if c.name == column][0]
def assertColumnNull(self, table, column, using='default'):
self.assertEqual(self._get_column_allows_null(table, column, using), True)
def assertColumnNotNull(self, table, column, using='default'):
self.assertEqual(self._get_column_allows_null(table, column, using), False)
def assertIndexExists(self, table, columns, value=True, using='default'):
with connections[using].cursor() as cursor:
self.assertEqual(
value,
any(
c["index"]
for c in connections[using].introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertIndexNotExists(self, table, columns):
return self.assertIndexExists(table, columns, False)
def assertFKExists(self, table, columns, to, value=True, using='default'):
with connections[using].cursor() as cursor:
self.assertEqual(
value,
any(
c["foreign_key"] == to
for c in connections[using].introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertFKNotExists(self, table, columns, to):
return self.assertFKExists(table, columns, to, False)
@contextmanager
def temporary_migration_module(self, app_label='migrations', module=None):
"""
Allows testing management commands in a temporary migrations module.
Wrap all invocations to makemigrations and squashmigrations with this
context manager in order to avoid creating migration files in your
source tree inadvertently.
Takes the application label that will be passed to makemigrations or
squashmigrations and the Python path to a migrations module.
The migrations module is used as a template for creating the temporary
migrations module. If it isn't provided, the application's migrations
module is used, if it exists.
Returns the filesystem path to the temporary migrations module.
"""
with tempfile.TemporaryDirectory() as temp_dir:
target_dir = tempfile.mkdtemp(dir=temp_dir)
with open(os.path.join(target_dir, '__init__.py'), 'w'):
pass
target_migrations_dir = os.path.join(target_dir, 'migrations')
if module is None:
module = apps.get_app_config(app_label).name + '.migrations'
try:
source_migrations_dir = module_dir(import_module(module))
except (ImportError, ValueError):
pass
else:
shutil.copytree(source_migrations_dir, target_migrations_dir)
with extend_sys_path(temp_dir):
new_module = os.path.basename(target_dir) + '.migrations'
with self.settings(MIGRATION_MODULES={app_label: new_module}):
yield target_migrations_dir
| auready/django | tests/migrations/test_base.py | Python | bsd-3-clause | 4,977 |
from io import open
from itertools import islice
import numpy as np
import os
import random
import time
import yaml
def strip_quotations_newline(text):
''' This function is needed when reading tweets from Labeled_Tweets.json
'''
text = text.rstrip()
if text[0] == '"':
text = text[1:]
if text[-1] == '"':
text = text[:-1]
return text
def split_text(text):
text = strip_quotations_newline(text)
splitted_text = text.split(" ")
cleaned_text = [x for x in splitted_text if len(x)>1]
text_lowercase = [x.lower() for x in cleaned_text]
return text_lowercase
def split_dataset(X, Y):
split_ratio = 0.7
split_index = int(0.7 * len(X))
return X[:split_index], Y[:split_index], X[split_index:], Y[split_index:]
def load_data(file=None):
''' Loads the training data from a yaml file and returns - input training
data, output training data, input test data and output test data
'''
start_time = time.time()
if file is None:
file = 'string_classifier_test.yaml'
yaml_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
file)
X, Y = [], []
with open(yaml_path, 'r') as yaml_file:
yaml_dict = yaml.load(yaml_file)
interactions = yaml_dict['states']['Home']['interaction']
# The first element contains no training data,
# so only consider [1:].
for answer_group in interactions['answer_groups'][1:]:
label = answer_group['outcome']['feedback'][0]
for rule in answer_group['rule_specs']:
if 'inputs' in rule and 'training_data' in rule['inputs']:
for answer in rule['inputs']['training_data']:
X.append(answer)
Y.append(label)
combined = list(zip(X, Y))
random.shuffle(combined)
X[:], Y[:] = zip(*combined)
end_time = time.time()
print "Data load time={0} sec, Please add this up time to the exectuion time".format(end_time - start_time)
return split_dataset(X, Y)
def load_huge_data(samples=10000):
''' Loads `sample` number of examples (around 0.6 million tweets in total).
Useful for evaluating how much time each classifier takes.
'''
start_time = time.time()
json_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Labeled_Tweets.json')
X, Y = [], []
with open(json_path, 'r') as json_file:
lines_gen = islice(json_file, samples)
for line in lines_gen:
# Bit of a hack, json_loads wasn't working due to some reason
tweet = line.split(',')[4].split(':')[1][2:-1]
X.append(tweet)
Y.append(random.random() > 0.5)
combined = list(zip(X[1:100000], Y[1:100000]))
a = time.time()
random.shuffle(combined)
random.shuffle(combined)
X[:], Y[:] = zip(*combined)
end_time = time.time()
print "Data load time={0} sec, Please add this up time to the exectuion time".format(end_time - start_time)
return split_dataset(X, Y)
if __name__ == "__main__":
print load_data()
| anmolshkl/oppia-ml | load_data.py | Python | apache-2.0 | 3,107 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-01 03:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0007_auto_20170630_1248'),
]
operations = [
migrations.RemoveField(
model_name='account',
name='language',
),
migrations.RemoveField(
model_name='account',
name='timezone',
),
]
| michealcarrerweb/LHVent_app | account/migrations/0008_auto_20170701_0333.py | Python | mit | 500 |
# -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from setuptools import setup, find_packages
from dist_utils import fetch_requirements
from dist_utils import apply_vagrant_workaround
from st2actions import __version__
ST2_COMPONENT = 'st2actions'
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
REQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')
install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)
apply_vagrant_workaround()
setup(
name=ST2_COMPONENT,
version=__version__,
description='{} component'.format(ST2_COMPONENT),
author='StackStorm',
author_email='[email protected]',
install_requires=install_reqs,
dependency_links=dep_links,
test_suite=ST2_COMPONENT,
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['setuptools', 'tests']),
scripts=[
'bin/st2actionrunner',
'bin/st2notifier',
'bin/st2resultstracker',
'bin/runners.sh'
]
)
| alfasin/st2 | st2actions/setup.py | Python | apache-2.0 | 1,752 |
from __future__ import absolute_import, print_function, division
from netlib.http import decoded
from .connections import ClientConnection, ServerConnection
from .flow import Flow, Error
from .http import (
HTTPFlow, HTTPRequest, HTTPResponse, Headers,
make_error_response, make_connect_request, make_connect_response, expect_continue_response
)
from .tcp import TCPFlow
FLOW_TYPES = dict(
http=HTTPFlow,
tcp=TCPFlow,
)
__all__ = [
"HTTPFlow", "HTTPRequest", "HTTPResponse", "Headers", "decoded",
"make_error_response", "make_connect_request",
"make_connect_response", "expect_continue_response",
"ClientConnection", "ServerConnection",
"Flow", "Error",
"TCPFlow",
"FLOW_TYPES",
]
| x2Ident/x2Ident_test | mitmproxy/mitmproxy/models/__init__.py | Python | gpl-3.0 | 753 |
# immediately below is stupid hackery for setuptools to work with Cython
import distutils.extension
import distutils.command.build_ext
from distutils.extension import Extension as _Extension
from setuptools import setup
distutils.extension.Extension = _Extension
distutils.command.build_ext.Extension = _Extension
Extension = _Extension
from Cython.Distutils import build_ext
# end stupid hackery
# these lines will cause html annotation files to be generated
from Cython.Compiler.Main import default_options as pyrex_default_options
pyrex_default_options['annotate'] = True
import os
import numpy
import sys
build_ffmpeg = True
if "--disable-ffmpeg" in sys.argv:
sys.argv.remove("--disable-ffmpeg")
build_ffmpeg = False
if build_ffmpeg:
print "building ffmpeg/_extract.o"
os.system("g++ -Wno-deprecated-declarations -D__STDC_CONSTANT_MACROS -c -O3 "
"-fPIC vision/ffmpeg/_extract.c -o vision/ffmpeg/_extract.o")
print "building liblinear"
os.system("make -C vision/liblinear")
root = os.getcwd() + "/vision/"
ext_modules = [
Extension("vision.annotations", ["vision/annotations.pyx",
"vision/annotations.pxd"]),
Extension("vision.features", ["vision/features.pyx"]),
Extension("vision.model", ["vision/model.pyx"]),
Extension("vision.convolution", ["vision/convolution.pyx"]),
Extension("vision.track.standard", ["vision/track/standard.pyx"]),
Extension("vision.alearn.linear", ["vision/alearn/linear.pyx"]),
Extension("vision.alearn.marginals", ["vision/alearn/marginals.pyx"]),
Extension("vision.track.dp", ["vision/track/dp.pyx",
"vision/track/dp.pxd"]),
Extension("vision.track.pairwise", ["vision/track/pairwise.pyx"]),
Extension("vision.svm", ["vision/svm.pyx"],
extra_objects = [root + "liblinear/linear.o",
root + "liblinear/tron.o",
root + "liblinear/blas/blas.a"],
language = "c++")]
if build_ffmpeg:
ext_modules.append(
Extension("vision.ffmpeg.extract",
sources = ["vision/ffmpeg/extract.pyx"],
include_dirs = [root + "ffmpeg/"],
library_dirs = [root + "ffmpeg/"],
libraries = ["avformat", "avcodec", "avutil", "swscale"],
extra_objects = [root + "ffmpeg/_extract.o"],
language = "c++")
)
for e in ext_modules:
e.pyrex_directives = {
"boundscheck": False,
"cdivision": True,
"infer_types": True,
"embedsignature": True}
# e.include_dirs.append(".")
e.extra_compile_args = ["-w"]
e.include_dirs.append(numpy.get_include())
setup(
name = "pyvision",
author = "Carl Vondrick",
author_email = "[email protected]",
description = "A concise computer vision toolkit",
license = "MIT",
version = "0.2.5",
classifiers = ["Development Status :: 1 - Planning",
"Intended Audience :: Developers"],
packages = ["vision",
"vision.track",
"vision.ffmpeg",
"vision.alearn",
"vision.reporting"],
cmdclass = {"build_ext": build_ext},
ext_modules = ext_modules,
#ext_package = "vision"
)
| danielhauagge/pyvision | setup.py | Python | mit | 3,264 |
# Image windowing class
import numpy as np
import sidpy
from scipy import fftpack
from scipy.signal import hanning, blackman
from skimage.transform import rescale
class ImageWindowing:
"""
This class will generate windows from sidpy dataset objects. At present only 2D windowing is allowed.
"""
def __init__(self, parms_dict, verbose = False):
'''Sliding Window Class.
Parameters
----------
- parms_dict : dictionary
Dictionary with parameters of the windowing process, see below.
Keys:
- 'window_size_x' (integer) (required): size of the window across the x-axis
- 'window_size_y' (integer) (required): size of the window across the y-axis
- 'window_step_x' (integer) (required): step size of the window across the x-axis. Sometimes referred to as 'strides'
- 'window_step_y' (integer) (required): step size of the window across the y-axis. Sometimes referred to as 'strides'
- 'mode' (string) (Optional, default is 'image'): One of 'image' or 'fft' which defines the processing to be performed for each window.
The choice of 'fft' will perform 2D fast Fourier transforms on each image whereas 'image' will not perform any operation on the window
- 'fft_mode' (string) (Optional, default is 'abs'): If mode is 'fft', choose whether to look at amplitude or phase. Options are 'abs', 'phase' and 'complex'.
- 'interpol_factor' (float) (Optional, default is 1.0): Interpolation factor for windows to increase or decrease size of the windows.
- 'zoom_factor' (integer or list of ints) (Optional, default is 1): Zoom the window by this factor, typically done for 'fft' mode to observe higher frequencies clearly
If passing a list of ints, this will determine the degree of cropping per axis
- 'filter' (string) (Optional, default is None): Filtering to use for the image window. Options are 'blackman', 'hanning'.
The filter is applied to each window before 'mode'.
- verbose : (Optional) Boolean
Verbose flag. Default is False.
Returns
--------
Instance of ImageWindowing object setup with parameters defined by the parms_dict above.
'''
self.window_step_x = parms_dict['window_step_x']
self.window_step_y = parms_dict['window_step_y']
self.window_size_x = parms_dict['window_size_x']
self.window_size_y = parms_dict['window_size_y']
self.fft_mode = 'abs'
self.verbose = verbose
if 'mode' in parms_dict.keys():
if parms_dict['mode'] not in ['image', 'fft']:
raise ValueError("Parameters dictionary field 'mode' must be one of 'image' or 'fft'."
"Try again with one of these two options.")
else:
self.mode = parms_dict['mode']
else:
self.mode = 'image'
parms_dict['mode'] = 'image'
if 'interpol_factor' in parms_dict.keys(): self.interpol_factor = parms_dict['interpol_factor']
else:
self.interpol_factor = 1
parms_dict['interpol_facor']=1
if 'zoom_factor' in parms_dict.keys(): self.zoom_factor = parms_dict['zoom_factor']
else:
self.zoom_factor = 1
parms_dict['zoom_facor'] = 1
# Based on the zoom and interpolation factors we need to figure out the final size of the window
self.window_size_final_x, self.window_size_final_y = self._get_window_size()
#Setup the filter for the window
self.filter = 'None'
self.filter_mat = np.ones((self.window_size_final_x, self.window_size_final_y))
if self.mode=='fft':
#load FFT options
if 'filter' in parms_dict.keys():
if parms_dict['filter'] not in ['blackman', 'hanning']:
raise ValueError("Parameter 'filter' must be one of 'hanning', 'blackman'")
else:
self.filter = parms_dict['filter']
if self.filter=='hanning':
filter_x = hanning(self.window_size_final_x)
filter_y = hanning(self.window_size_final_y)
self.filter_mat = np.sqrt(np.outer(filter_x,filter_y))
elif self.filter=='blackman':
filter_x = blackman(self.window_size_final_x)
filter_y = blackman(self.window_size_final_y)
self.filter_mat = np.sqrt(np.outer(filter_x,filter_y))
if 'fft_mode' in parms_dict.keys():
if parms_dict['fft_mode'] not in ['abs', 'phase', 'complex']:
raise ValueError("Parameter 'fft_mode' must be \
one of 'abs', 'phase' or 'complex' ")
else:
self.fft_mode = parms_dict['fft_mode']
else:
self.fft_mode = 'abs' #default to absolute value in case fft mode is not provided
parms_dict['fft_mode'] = 'abs'
if self.verbose:
print('ImageWindowing Object created with parameters {}'.format(parms_dict))
self.window_parms = parms_dict
return
def _get_window_size(self):
'''
Computes window size based on zoom and interpolation factors
'''
image_test = np.random.uniform(size=(self.window_size_x, self.window_size_y))
print('image test is shape {}'.format(image_test.shape))
image_zoomed = self.zoom(image_test, self.zoom_factor)
print('image zoomed is shape {}'.format(image_zoomed.shape))
#interpolate it
zoomed_interpolated = rescale(image_zoomed, self.interpol_factor)
print('image zoomed interpol is shape {}'.format(zoomed_interpolated.shape))
return zoomed_interpolated.shape[0],zoomed_interpolated.shape[1]
def MakeWindows(self, dataset, dim_slice=None):
'''
Image should be a sidpy dataset object
We will take the image to be the first two spatial dimensions,
unless dimensions are specified
Inputs:
- dataset (sidpy.Dataset object of the image to be windowed)
- dim_slice (List) (Optional). list of integers of the slices over which the
image windowing should take place. This should be of length number of dimensions of
the dataset minus two.
Returns:
- windowed_dataset (sidpy.Dataset) object with windows created as per
the parameters passed to the ImageWindowing class.
'''
# This is the windowing function. Will generate the windows (but not the FFT)
num_dimensions = dataset.ndim
if dim_slice is None:
if num_dimensions > 2:
raise ValueError('You have specified windowing on a sidpy dataset '
'with more than 2 dimensions without specifying slices')
else:
image_source = dataset[:]
image_dims = [0,1]
elif dim_slice is not None:
"""Get all spatial dimensions"""
image_dims = []
for dim, axis in dataset._axes.items():
if axis.dimension_type == sidpy.DimensionType.SPATIAL:
image_dims.append(dim)
all_dims = np.arange(0, num_dimensions)
slice_list = []
for k in range(num_dimensions):
if k in image_dims:
slice_list.append(slice(None, dataset.shape[k], 1))
else:
slice_list.append(dim_slice)
image_source = dataset[tuple(slice_list)]
self.image_shape = image_source.shape
if self.verbose:
print('Full image shape is {}'.format(self.image_shape))
window_step = [self.window_step_x, self.window_step_y]
window_size = [self.window_size_x, self.window_size_y]
window_size_final = [self.window_size_final_x, self.window_size_final_y]
dim_vec = []
for i in range(2):
dim_vec.append(np.arange(0, self.image_shape[i] - window_size[i], window_step[i]))
if self.verbose:
print("dim vec is {}".format(dim_vec))
_, pos_vec = self.build_ind_val_matrices(dim_vec)
if self.verbose:
print("Pos vec is {}".format(pos_vec))
pca_mat = np.zeros(shape=(pos_vec.shape[0], np.prod(window_size_final)), dtype=np.complex64)
pos_vec = np.int32(pos_vec)
for ind, pos in enumerate(pos_vec):
start_stop = [slice(x, x + y, 1) for x, y in zip(pos, window_size)]
full_slice = image_source[tuple(start_stop)]
full_slice = self._return_win_image_processed(full_slice)
pca_mat[ind] = full_slice.flatten()
self.pos_vec = pos_vec
# Get the positions and make them dimensions
new_x_vals = np.linspace(dataset._axes[image_dims[0]].values.min(),
dataset._axes[image_dims[0]].values.max(), len(np.unique(pos_vec[:, 0])))
new_y_vals = np.linspace(dataset._axes[image_dims[1]].values.min(),
dataset._axes[image_dims[1]].values.max(), len(np.unique(pos_vec[:, 1])))
if self.verbose:
print("position values x {} and y {}".format(new_x_vals, new_y_vals))
windows_reshaped = pca_mat.reshape(len(new_x_vals), len(new_y_vals),
self.window_size_final_x, self.window_size_final_y)
if self.verbose:
print('Reshaped windows size is {}'.format(windows_reshaped.shape))
# Make a sidpy dataset
#if the data is complex, then convert it to absolute
#this needs to be changed..depending on user preferences.
if np.iscomplexobj(windows_reshaped):
if self.fft_mode == 'abs':
windows_reshaped = np.array(np.abs(windows_reshaped), dtype = np.float64)
elif self.fft_mode == 'phase':
windows_reshaped = np.array(np.angle(windows_reshaped), dtype=np.float64)
data_set = sidpy.Dataset.from_array(windows_reshaped,
name='Image_Windowed')
# Set the data type
data_set.data_type = 'Image_4d'
# Add quantity and units
data_set.units = dataset.units
data_set.quantity = dataset.quantity
# Add dimension info
window_size_fraction_x = window_size[0]/self.image_shape[0]
window_size_fraction_y = window_size[1] / self.image_shape[1]
window_extent_x = (dataset._axes[image_dims[0]].values.max() -
dataset._axes[image_dims[0]].values.min())*window_size_fraction_x
window_extent_y = (dataset._axes[image_dims[1]].values.max() -
dataset._axes[image_dims[1]].values.min()) * window_size_fraction_y
if self.mode =='fft':
#to check if this is correct
z_dimx = np.linspace(0, 1.0/(window_extent_x / self.zoom_factor), data_set.shape[2])
z_dimy = np.linspace(0, 1.0/(window_extent_y / self.zoom_factor), data_set.shape[3])
else:
z_dimx = np.linspace(0, window_extent_x/self.zoom_factor, data_set.shape[2])
z_dimy = np.linspace(0, window_extent_y/self.zoom_factor, data_set.shape[3])
data_set.set_dimension(0, sidpy.Dimension(new_x_vals,
name=dataset._axes[image_dims[0]].name,
units=dataset._axes[image_dims[0]].units,
quantity=dataset._axes[image_dims[0]].quantity,
dimension_type='spatial'))
data_set.set_dimension(1, sidpy.Dimension(new_y_vals,
name=dataset._axes[image_dims[1]].name,
units=dataset._axes[image_dims[1]].units,
quantity=dataset._axes[image_dims[1]].quantity,
dimension_type='spatial'))
data_set.set_dimension(2, sidpy.Dimension(z_dimx,
name='WindowX',
units='m', quantity='kx',
dimension_type='spectral'))
data_set.set_dimension(3, sidpy.Dimension(z_dimy,
name='WindowY',
units='m', quantity='ky',
dimension_type='spectral'))
# append metadata
data_set.metadata = self._merge_dictionaries(dataset.metadata, self.window_parms)
return data_set
#TODO: After next release of sidpy, remove this method and use sidpy.base.num_utils copy
def build_ind_val_matrices(self, unit_values):
"""
Builds indices and values matrices using given unit values for each dimension.
This function is originally from pyUSID.io
Unit values must be arranged from fastest varying to slowest varying
Parameters
----------
unit_values : list / tuple
Sequence of values vectors for each dimension
Returns
-------
ind_mat : 2D numpy array
Indices matrix
val_mat : 2D numpy array
Values matrix
"""
if not isinstance(unit_values, (list, tuple)):
raise TypeError('unit_values should be a list or tuple')
if not np.all([np.array(x).ndim == 1 for x in unit_values]):
raise ValueError('unit_values should only contain 1D array')
lengths = [len(x) for x in unit_values]
tile_size = [np.prod(lengths[x:]) for x in range(1, len(lengths))] + [1]
rep_size = [1] + [np.prod(lengths[:x]) for x in range(1, len(lengths))]
val_mat = np.zeros(shape=(len(lengths), np.prod(lengths)))
ind_mat = np.zeros(shape=val_mat.shape, dtype=np.uint32)
for ind, ts, rs, vec in zip(range(len(lengths)), tile_size, rep_size, unit_values):
val_mat[ind] = np.tile(np.repeat(vec, rs), ts)
ind_mat[ind] = np.tile(np.repeat(np.arange(len(vec)), rs), ts)
val_mat = val_mat.T
ind_mat = ind_mat.T
return ind_mat, val_mat
def _return_win_image_processed(self, img_window):
#Real image slice, returns it back with image processed
if self.filter != 'None':
img_window *= self.filter_mat # Apply filter
if self.mode == 'fft': # Apply FFT if needed
img_window = np.fft.fftshift(np.fft.fft2(img_window))
if self.fft_mode == 'amp':
img_window = np.abs(img_window,)
elif self.fft_mode == 'phase':
img_window = np.angle(img_window)
elif self.fft_mode == 'complex':
img_window = np.array(img_window, dtype = np.complex64)
#Zoom and interpolate if needed
if self.zoom_factor == 1 and self.interpol_factor == 1:
return img_window
else:
img_window = self.zoom(img_window, self.zoom_factor) # Zoom it
img_window = self.rescale_win(img_window, self.interpol_factor) # Rescale
return img_window
def _merge_dictionaries(self, dict1, dict2):
#given two dictionaries, merge them into one
merged_dict = {**dict1, **dict2}
return merged_dict
def zoom(self, img_window, zoom_factor):
#Zooms by the zoom factor
if zoom_factor==1:
return img_window
else:
if type(zoom_factor) is int:
zoom_factor = [zoom_factor, zoom_factor]
#Find the midpoint
img_x_mid = img_window.shape[0]//2
img_y_mid = img_window.shape[1]//2
zoom_x_size = (img_window.shape[0] / zoom_factor[0])/2
zoom_y_size = (img_window.shape[1] / zoom_factor[1])/2
img_window = img_window[int(img_x_mid - zoom_x_size) : int(img_x_mid + zoom_x_size),
int(img_y_mid - zoom_y_size ): int(img_y_mid + zoom_y_size)]
return img_window
def rescale_win(self, img_window, interpol_factor):
if self.fft_mode !='complex':
img_window = np.array(img_window, dtype = np.float32)
complex_rescaled_image = rescale(img_window, interpol_factor)
else:
real_img = np.real(img_window)
imag_img = np.imag(img_window)
real_img_scaled = rescale(real_img, interpol_factor)
imag_img_scaled = rescale(imag_img, interpol_factor)
complex_rescaled_image = real_img_scaled + 1j*imag_img_scaled
return complex_rescaled_image
| pycroscopy/pycroscopy | pycroscopy/image/image_window.py | Python | mit | 17,132 |
from collections import defaultdict
import bisect
from utils import memo
class Solution(object):
def findRotateSteps(self, ring, key):
"""
:type ring: str
:type key: str
:rtype: int
"""
m, n = len(ring), len(key)
mid = (m >> 1) + 1
dct = defaultdict(list)
for i, c in enumerate(ring):
dct[c].append(i)
only_once = {k for k, v in dct.items() if len(v) == 1}
@memo
def dfs(i, pre, key):
if i == len(key):
return 0
arr = dct[key[i]]
if len(arr) == 1:
temp = abs(pre - arr[0])
return min(temp, m - temp)
j = bisect.bisect(arr, pre)
stp, nxt = [], []
if j == len(arr) or j == 0:
if j == 0:
if arr[0] - pre < mid:
stp.append(arr[0] - pre)
nxt.append(arr[0])
if pre + m - arr[-1] < mid:
stp.append(pre + m - arr[-1])
nxt.append(arr[-1])
else:
if pre - arr[-1] < mid:
stp.append(pre - arr[-1])
nxt.append(arr[-1])
if arr[0] + m - pre < mid:
stp.append(arr[0] + m - pre)
nxt.append(arr[0])
else:
if pre - arr[j - 1] < mid:
stp.append(pre - arr[j - 1])
nxt.append(arr[j - 1])
if arr[j] - pre < mid:
stp.append(arr[j] - pre)
nxt.append(arr[j])
return min(s + dfs(i + 1, t, key) for s, t in zip(stp, nxt))
total = n
pre = 0
part = []
for i in range(n):
if i == 0 or key[i] != key[i - 1]:
part.append(key[i])
if key[i] in only_once:
total += dfs(0, pre, ''.join(part))
pre = dct[key[i]][0]
part = []
if part:
total += dfs(0, pre, ''.join(part))
return total
# assert Solution().findRotateSteps("godding", "gd") == 4
# assert Solution().findRotateSteps("godding", "gdi") == 7
# assert Solution().findRotateSteps("edcba", "abcde") == 10
# assert Solution().findRotateSteps("ababcab", "acbaacba") == 17
print(Solution().findRotateSteps("fhkfmhkmdk", "fkdkkkdkmfmkkmdkfdmmmfmhffdkhfhhfhfmfhmfmhhmmkkmkhhkhkhkmfhmmmhhkmdkfkkkfdhkdfhdfkkdfkkkfkfhmhkkfmkh"))
| wufangjie/leetcode | 514. Freedom Trail.py | Python | gpl-3.0 | 2,585 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph quantization script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.platform import flags as flags_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.tools.quantization import quantize_graph
flags = flags_lib
FLAGS = flags.FLAGS
def run_graph_def(graph_def, input_map, outputs):
graph = ops_lib.Graph()
with graph.as_default():
importer.import_graph_def(graph_def, input_map={}, name="")
with session.Session(graph=graph) as sess:
results = sess.run(outputs, feed_dict=input_map)
return results
def test_mat_mul(m, n, k, a, b):
"""Tests a MatMul replacement."""
a_constant_name = "a_constant"
b_constant_name = "b_constant"
mat_mul_name = "mat_mul"
float_graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=a, dtype=dtypes.float32, shape=[m, k])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=b, dtype=dtypes.float32, shape=[k, n])
float_graph_def.node.extend([b_constant])
mat_mul_node = quantize_graph.create_node("MatMul", mat_mul_name,
[a_constant_name, b_constant_name])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_a", False)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_b", False)
float_graph_def.node.extend([mat_mul_node])
test_graph(float_graph_def, {}, [mat_mul_name])
def test_conv(depth, image_width, image_height, image_batch_count, filter_size,
filter_count, stride, padding, input_values, filter_values):
"""Tests a Conv replacement."""
input_constant_name = "input_constant"
filter_constant_name = "filter_constant"
conv_name = "conv"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=input_values,
dtype=dtypes.float32,
shape=[image_batch_count, image_height, image_width, depth])
float_graph_def.node.extend([input_constant])
filter_constant = quantize_graph.create_constant_node(
filter_constant_name,
value=filter_values,
dtype=dtypes.float32,
shape=[filter_size, filter_size, depth, filter_count])
float_graph_def.node.extend([filter_constant])
conv_node = quantize_graph.create_node(
"Conv2D", conv_name, [input_constant_name, filter_constant_name])
quantize_graph.set_attr_dtype(conv_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(conv_node, "strides", [1, stride, stride, 1])
quantize_graph.set_attr_string(conv_node, "padding", padding)
float_graph_def.node.extend([conv_node])
test_graph(float_graph_def, {}, [conv_name])
def are_tensors_near(a, b, tolerance):
"""Tests whether two tensors are nearly identical.
This is a specialized comparison function designed to help debug problems with
quantization. It prints out information about the differences between tensors
on failure, paying special attention to possible biases by looking at the mean
and absolute average errors.
Args:
a: First comparison tensor.
b: Second comparison tensor.
tolerance: Float value indicating how large an error between values is ok.
Returns:
Boolean indicating whether the two inputs were close enough.
"""
flat_a = a.flatten()
flat_b = b.flatten()
if len(flat_a) != len(flat_b):
tf_logging.info("Tensors are different sizes: " + str(len(flat_a)) + " vs "
+ str(len(flat_b)))
return False
value_count = len(flat_a)
how_many_different = 0
total_difference = 0
total_abs_difference = 0
for index in range(value_count):
a_value = flat_a[index]
b_value = flat_b[index]
difference = a_value - b_value
total_difference += difference
total_abs_difference += abs(difference)
if abs(difference) > tolerance:
how_many_different += 1
mean_difference = total_difference / value_count
mean_abs_difference = total_abs_difference / value_count
proportion_different = (how_many_different * 1.0) / value_count
if how_many_different == 0:
return True
else:
tf_logging.info("Tensors have {0} different values ({1}%), with mean"
" difference {2} and mean absolute difference {3}".format(
how_many_different, proportion_different * 100,
mean_difference, mean_abs_difference))
return False
def get_top_value(input_values):
max_value = None
max_index = None
for index, value in enumerate(input_values.flatten()):
if max_value is None or value > max:
max_value = value
max_index = index
return max_index, max_value
def test_graph(float_graph_def, input_map, output_names, log_graph=False):
"""Runs the float graph through the rewriter and tests the results."""
float_results = run_graph_def(
float_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
# TODO(petewarden): round test is currently failing because there is no
# RoundToSteps op available.
# round_rewriter = quantize_graph.GraphRewriter(float_graph_def, "round")
# round_graph_def = round_rewriter.rewrite(output_name)
# round_results = run_graph_def(round_graph_def, input_map,
# [output_name + ":0"])
# assert are_tensors_near(expected, round_results[0], 1.0)
#
# TODO(petewarden): Add test for "quantize" mode.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(output_names)
eightbit_results = run_graph_def(
eightbit_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, eightbit_results):
assert are_tensors_near(expected, result, 1.0)
if log_graph:
tf_logging.info("8bit:\n%s", str(eightbit_graph_def))
# Test the weights_rounded mode. This uses the default bit_depth.
weights_rounded_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "weights_rounded", quantized_input_range=None)
weights_rounded_graph_def = weights_rounded_rewriter.rewrite(output_names)
weights_rounded_results = run_graph_def(
weights_rounded_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, weights_rounded_results):
assert are_tensors_near(expected, result, 1.0)
class QuantizeGraphTest(test.TestCase):
def test_negative_const_problem(self):
shape_constant_name = "shape_constant"
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=-0.8, dtype=dtypes.float32, shape=[1])
quantization_result = quantize_graph.quantize_weight_eightbit(
shape_constant, b"MIN_COMBINED")
self.assertEqual(4, len(quantization_result))
def test_odd_padding_problem(self):
"""Tests one error case we ran into in a real graph."""
test_conv(1, 4, 4, 1, 3, 1, 2, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_mat_mul_tiny(self):
# These tests are added to test the generate case where
# min(matrix) == max(matrix), which used to cause problems.
test_mat_mul(1, 1, 1, [2], [3])
test_mat_mul(1, 2, 1, [1], [2, 3])
test_mat_mul(1, 1, 2, [1, 1], [1, 1])
test_mat_mul(1, 1, 2, [0, 0], [1, 1])
# The general case.
test_mat_mul(1, 1, 2, [1, 2], [1, 2])
def test_mat_mul_small(self):
test_mat_mul(2, 4, 3, [1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
def test_conv(self):
test_conv(1, 4, 3, 1, 3, 1, 1, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[1, 4, 7, 2, 5, 8, 3, 6, 9])
def test_reshape(self):
"""Tests that MatMul->Reshape->MatMul avoids extra quantize/dequantize."""
def make_matmul(name, a, b):
n = quantize_graph.create_node("MatMul", name, [a.name, b.name])
quantize_graph.set_attr_dtype(n, "T", dtypes.float32)
quantize_graph.set_attr_bool(n, "transpose_a", False)
quantize_graph.set_attr_bool(n, "transpose_b", False)
return n
# matmul_1 = input*weight_1
input_node = quantize_graph.create_constant_node(
"input", value=[0, 1, 2, 3], dtype=dtypes.float32, shape=[4, 1])
weight_1_node = quantize_graph.create_constant_node(
"weight_1",
value=[.5, .6, .7, .8, .9],
dtype=dtypes.float32,
shape=[1, 5])
matmul_1_node = make_matmul("matmul_1", input_node, weight_1_node)
# Reshape 4x5 to 10x2.
new_shape_node = quantize_graph.create_constant_node(
"new_shape_node", value=[10, 2], dtype=dtypes.int32, shape=[2])
reshape_node = quantize_graph.create_node(
"Reshape", "reshape", [matmul_1_node.name, new_shape_node.name])
quantize_graph.set_attr_dtype(reshape_node, "T", dtypes.float32)
# matmul_2_node = reshape*weight_2
weight_2_node = quantize_graph.create_constant_node(
"weight_2", value=[1.5, 2.5], dtype=dtypes.float32, shape=[2, 1])
matmul_2_node = make_matmul("matmul_2", reshape_node, weight_2_node)
g = graph_pb2.GraphDef()
g.node.extend([
input_node, weight_1_node, matmul_1_node, new_shape_node, reshape_node,
weight_2_node, matmul_2_node
])
# Test the graph
test_graph(g, {}, ["matmul_2"])
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
g, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(["matmul_2"])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(1, ops.count("QuantizedReshape"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_quantize_array(self):
# Test invalid parameters (empty array, or 0 buckets.
self.assertRaises(ValueError, quantize_graph.quantize_array, np.array([]),
2)
self.assertRaises(ValueError, quantize_graph.quantize_array,
np.array([1, 2]), 0)
# Test input array of length 1.
arr = np.array([1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertEqual(arr, qarr)
qarr = quantize_graph.quantize_array(arr, 2)
self.assertEqual(arr, qarr)
# Test input array with all elements equal.
arr = np.array([1, 1, 1])
qarr = quantize_graph.quantize_array(arr, 10)
self.assertTrue((np.array([1, 1, 1]) == qarr).all())
# Test "normal" input arrays.
arr = np.array([0, 0.3, 0.6, 1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertTrue((np.array([0.5, 0.5, 0.5, 0.5]) == qarr).all())
qarr = quantize_graph.quantize_array(arr, 2)
self.assertTrue((np.array([0.25, 0.25, 0.75, 0.75]) == qarr).all())
qarr = quantize_graph.quantize_array(arr.reshape((2, 2)), 2)
self.assertTrue((np.array([[0.25, 0.25], [0.75, 0.75]]) == qarr).all())
def test_non_float_concat(self):
concat_dim = quantize_graph.create_constant_node(
"concat_dim", value=0, dtype=dtypes.int32, shape=[])
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
b = quantize_graph.create_constant_node(
"b",
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.int32,
shape=[2, 2, 3])
concat = quantize_graph.create_node("Concat", "concat",
[concat_dim.name, a.name, b.name])
quantize_graph.set_attr_int(concat, "N", 2)
quantize_graph.set_attr_dtype(concat, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([concat_dim, a, b, concat])
test_graph(g, {}, [concat.name])
def test_non_float_reshape(self):
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
shape = quantize_graph.create_constant_node(
"shape", value=[12], dtype=dtypes.int32, shape=[1])
reshape = quantize_graph.create_node("Reshape", "reshape",
[a.name, shape.name])
quantize_graph.set_attr_dtype(reshape, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([a, shape, reshape])
test_graph(g, {}, [reshape.name])
def test_concat(self):
shape_constant_name = "shape_constant"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=0, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([shape_constant])
a_constant = quantize_graph.create_constant_node(
a_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name,
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([b_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[shape_constant_name, a_constant_name, b_constant_name])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
# Verify the concat is quantized.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([concat_name])
ops = [node.op for node in eightbit_graph_def.node]
self.assertEqual(1, ops.count("QuantizedConcat"))
def test_multiple_outputs(self):
input_constant_name = "input_constant"
split_constant_name = "split_constant"
split_name = "split"
concat_constant_name = "concat_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
split_constant = quantize_graph.create_constant_node(
split_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([split_constant])
split_node = quantize_graph.create_node(
"Split", split_name, [split_constant_name, input_constant_name])
quantize_graph.set_attr_int(split_node, "num_split", 2)
quantize_graph.set_attr_dtype(split_node, "T", dtypes.float32)
float_graph_def.node.extend([split_node])
concat_constant = quantize_graph.create_constant_node(
concat_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([concat_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[concat_constant_name, split_name + ":0", split_name + ":1"])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
def test_node_name_from_input(self):
self.assertEqual("SomeName",
quantize_graph.node_name_from_input("^SomeName:2"))
def test_unique_node_name_from_input(self):
self.assertEqual("__hat__SomeName__port__2",
quantize_graph.unique_node_name_from_input("^SomeName:2"))
def test_identity(self):
input_constant_name = "input_constant"
identity_name = "identity"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
identity_node = quantize_graph.create_node("Identity", identity_name,
[input_constant_name])
quantize_graph.set_attr_dtype(identity_node, "T", dtypes.float32)
float_graph_def.node.extend([identity_node])
mul_name = "mul"
mul_node = quantize_graph.create_node("Mul", mul_name,
[identity_name, identity_name])
quantize_graph.set_attr_dtype(mul_node, "T", dtypes.float32)
float_graph_def.node.extend([mul_node])
test_graph(float_graph_def, {}, [mul_name])
def test_keep_control_edges(self):
no_op_name = "no_op"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
graph_def.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = quantize_graph.create_node("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name,
[a_constant_name, "^" + a_check_name, "^" + no_op_name])
graph_def.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = quantize_graph.create_node("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = quantize_graph.create_node(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_identity_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
expected_output.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name, [a_constant_name, "^" + no_op_name])
expected_output.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_constant_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
output = graph_util.remove_training_nodes(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [add_name])
self.assertProtoEquals(expected_output, stripped_output)
def test_batch_norm(self):
input_constant_name = "input_constant"
mean_constant_name = "mean_constant"
variance_constant_name = "variance_constant"
beta_constant_name = "beta_constant"
gamma_constant_name = "gamma_constant"
batch_norm_name = "batch_norm"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6],
dtype=dtypes.float32,
shape=[1, 1, 6, 2])
float_graph_def.node.extend([input_constant])
mean_constant = quantize_graph.create_constant_node(
mean_constant_name, value=[10, 20], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([mean_constant])
variance_constant = quantize_graph.create_constant_node(
variance_constant_name,
value=[0.25, 0.5],
dtype=dtypes.float32,
shape=[2])
float_graph_def.node.extend([variance_constant])
beta_constant = quantize_graph.create_constant_node(
beta_constant_name, value=[0.1, 0.6], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([beta_constant])
gamma_constant = quantize_graph.create_constant_node(
gamma_constant_name, value=[0, 0], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([gamma_constant])
batch_norm_node = quantize_graph.create_node(
"BatchNormWithGlobalNormalization", batch_norm_name, [
input_constant_name, mean_constant_name, variance_constant_name,
beta_constant_name, gamma_constant_name
])
quantize_graph.set_attr_dtype(batch_norm_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(batch_norm_node, "scale_after_normalization",
False)
quantize_graph.set_attr_float(batch_norm_node, "variance_epsilon", 0.001)
float_graph_def.node.extend([batch_norm_node])
test_graph(float_graph_def, {}, [batch_norm_name])
def test_max_pool(self):
input_constant_name = "input_constant"
max_pool_name = "max_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
max_pool_node = quantize_graph.create_node("MaxPool", max_pool_name,
[input_constant_name])
quantize_graph.set_attr_int_list(max_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(max_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(max_pool_node, "padding", b"SAME")
float_graph_def.node.extend([max_pool_node])
test_graph(float_graph_def, {}, [max_pool_name])
def test_avg_pool(self):
input_constant_name = "input_constant"
avg_pool_name = "avg_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
avg_pool_node = quantize_graph.create_node("AvgPool", avg_pool_name,
[input_constant_name])
quantize_graph.set_attr_dtype(avg_pool_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(avg_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(avg_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(avg_pool_node, "padding", b"SAME")
float_graph_def.node.extend([avg_pool_node])
test_graph(float_graph_def, {}, [avg_pool_name])
def test_relu(self):
input_constant_name = "input_constant"
relu_name = "relu"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu_node = quantize_graph.create_node("Relu", relu_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
float_graph_def.node.extend([relu_node])
test_graph(float_graph_def, {}, [relu_name])
def test_relu_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
relu_node = quantize_graph.create_node("Relu", "relu", [input_node.name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=0, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=12, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[relu_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(
[input_node, relu_node, min_node, max_node, fake_quant_node])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_relu6(self):
input_constant_name = "input_constant"
relu6_name = "relu6"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu6_node = quantize_graph.create_node("Relu6", relu6_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu6_node, "T", dtypes.float32)
float_graph_def.node.extend([relu6_node])
test_graph(float_graph_def, {}, [relu6_name])
def test_bias_add(self):
input_constant_name = "input_constant"
offset_constant_name = "offset_constant"
bias_add_name = "bias_add"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 1, 2, 6])
float_graph_def.node.extend([input_constant])
offset_constant = quantize_graph.create_constant_node(
offset_constant_name,
value=[1, 2, 3, 4, 5, 6],
dtype=dtypes.float32,
shape=[6])
float_graph_def.node.extend([offset_constant])
bias_add_node = quantize_graph.create_node(
"BiasAdd", bias_add_name, [input_constant_name, offset_constant_name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def.node.extend([bias_add_node])
test_graph(float_graph_def, {}, [bias_add_name])
def test_quantized_input_range_errors(self):
with self.assertRaises(ValueError):
# Invalid mode.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "weights_rounded",
[0, 1])
with self.assertRaises(ValueError):
# Invalid range.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "eightbit", [0, -1])
def test_quantized_input_range_bias_add(self):
input_shape = [1, 1, 2, 6]
input_n = quantize_graph.create_node("Placeholder", "input", [])
quantize_graph.set_attr_dtype(input_n, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(input_n, "shape", input_shape)
offset_n = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6])
bias_add_n = quantize_graph.create_node("BiasAdd", "bias_add",
[input_n.name, offset_n.name])
quantize_graph.set_attr_dtype(bias_add_n, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_n, offset_n, bias_add_n])
input_map = {
input_n.name + ":0":
np.reshape([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], input_shape)
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [0, 12.])
def test_quantized_input_range_mat_mul(self):
shapes = [[3, 2], [2, 4]]
inputs = []
for i, shape in enumerate(shapes):
node = quantize_graph.create_node("Placeholder", "input_%s" % i, [])
quantize_graph.set_attr_dtype(node, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(node, "shape", shape)
inputs.append(node)
mat_mul_node = quantize_graph.create_node("MatMul", "mat_mul",
[n.name for n in inputs])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(inputs + [mat_mul_node])
input_map = {
inputs[0].name + ":0":
np.reshape([1, 2, 3, 4, 5, 6], shapes[0]),
inputs[1].name + ":0":
np.reshape([.8, .7, .6, .5, .4, .3, .2, .1], shapes[1])
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [0, 6.])
def _RunTestsForQuantizedInputRange(self, float_graph_def, input_map,
output_names, input_range):
if sys.version_info[0] == 3:
# uint8->quint8 conversion for numpy is not working currently.
return
quantized_input_map = {}
for k, v in input_map.items():
arr = [
int(
round((n - input_range[0]) * 255 / (input_range[1] - input_range[
0]))) for n in v.flat
]
arr = np.array(arr, np.uint8)
arr = arr.reshape(v.shape)
arr = arr.astype(dtypes.quint8.as_numpy_dtype)
quantized_input_map[k] = arr
output_tensors = [output_name + ":0" for output_name in output_names]
float_results = run_graph_def(float_graph_def, input_map, output_tensors)
# Quantize treating the input as quantized in range <input_range>.
rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit",
input_range)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, quantized_input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
# Quantize without treating input as quantized.
rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(
len(input_map), ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
def test_bias_add_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=-.5, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=15.5, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[bias_add_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([
input_node, offset_node, bias_add_node, min_node, max_node,
fake_quant_node
])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
# Pass in fallback_quantization_range, although it will have no effect
# because the FakeQuantWithMinMaxVars are used instead.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-100, 100])
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# The fallback constants are not in the graph.
self.assertEqual(0, node_names.count("fallback_quantization_min_value"))
self.assertEqual(0, node_names.count("fallback_quantization_max_value"))
def test_bias_add_w_fallback_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_node, offset_node, bias_add_node])
test_graph(float_graph_def, {}, [bias_add_node.name], log_graph=True)
# Verify there is only one Quantize, one Requantize op, and no
# RequantizationRange op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-.5, 15.5])
eightbit_graph_def = eightbit_rewriter.rewrite([bias_add_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# No RequantizationRange
self.assertEqual(0, ops.count("RequantizationRange"))
# The fallback constants are in the graph.
self.assertEqual(1, node_names.count("fallback_quantization_min_value"))
self.assertEqual(1, node_names.count("fallback_quantization_max_value"))
def test_remove_redundant_quantization(self):
a_constant_name = "a_constant"
a_constant_min_name = "a_constant_min"
a_constant_max_name = "a_constant_max"
a_dequantize_name = "a_dequantize"
a_quantize_name = "a_quantize"
b_constant_name = "b_constant"
b_constant_min_name = "b_constant_min"
b_constant_max_name = "b_constant_max"
b_dequantize_name = "b_dequantize"
b_quantize_name = "b_quantize"
mat_mul_name = "mat_mul"
graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_max])
a_dequantize_node = quantize_graph.create_node(
"Dequantize", a_dequantize_name,
[a_constant_name, a_constant_min_name, a_constant_max_name])
quantize_graph.set_attr_dtype(a_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_dequantize_node])
a_quantize_node = quantize_graph.create_node(
"QuantizeV2", a_quantize_name,
[a_dequantize_name, a_dequantize_name + ":1", a_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(a_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_quantize_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_max])
b_dequantize_node = quantize_graph.create_node(
"Dequantize", b_dequantize_name,
[b_constant_name, b_constant_min_name, b_constant_max_name])
quantize_graph.set_attr_dtype(b_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_dequantize_node])
b_quantize_node = quantize_graph.create_node(
"QuantizeV2", b_quantize_name,
[b_dequantize_name, b_dequantize_name + ":1", b_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(b_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_quantize_node])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_quantize_name, b_quantize_name, a_quantize_name + ":1",
a_quantize_name + ":2", b_quantize_name + ":1", b_quantize_name + ":2"
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
graph_def.node.extend([mat_mul_node])
expected_output = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_max])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_max])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_constant_name, b_constant_name, a_constant_min_name,
a_constant_max_name, b_constant_min_name, b_constant_max_name
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
expected_output.node.extend([mat_mul_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
rewriter = quantize_graph.GraphRewriter(
graph_def, [mat_mul_name], quantized_input_range=None)
output = rewriter.remove_redundant_quantization(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [mat_mul_name])
self.assertProtoEquals(expected_output, stripped_output)
if __name__ == "__main__":
test.main()
| xodus7/tensorflow | tensorflow/tools/quantization/quantize_graph_test.py | Python | apache-2.0 | 42,452 |
#!/usr/bin/env python
'''
Disclaimer - This is a solution to the below problem given the content we have
discussed in class. It is not necessarily the best solution to the problem.
In other words, I only use things we have covered up to this point in the class
(with some minor exceptions).
Python for Network Engineers
https://pynet.twb-tech.com
Learning Python
Class#3
I. Create an IP address converter (dotted decimal to binary). This will be
similar to what we did in class2 except:
A. Make the IP address a command-line argument instead of prompting the user
for it.
./binary_converter.py 10.88.17.23
B. Simplify the script logic by using the flow-control statements that we
learned in this class.
C. Zero-pad the digits such that the binary output is always 8-binary digits
long. Strip off the leading '0b' characters. For example,
OLD: 0b1010
NEW: 00001010
D. Print to standard output using a dotted binary format. For example,
IP address Binary
10.88.17.23 00001010.01011000.00010001.00010111
Note, you will probably need to use a 'while' loop and a 'break' statement
for part C.
while True:
...
break # on some condition (exit the while loop)
Python will execute this loop again and again until the 'break' is encountered.
'''
import sys
if len(sys.argv) != 2:
# Exit the script
sys.exit("Usage: ./ex1_binary_converter.py <ip_address>")
ip_addr = sys.argv.pop()
octets = ip_addr.split(".")
# create a blank list (needed because I use .append() method below)
ip_addr_bin = []
if len(octets) == 4:
for octet in octets:
bin_octet = bin(int(octet))
# strip off '0b' from front of string (you can slice a string also)
bin_octet = bin_octet[2:]
# prepend '0' to number until 8 chars long
while True:
if len(bin_octet) >= 8:
break
bin_octet = '0' + bin_octet
# add octet to new list
ip_addr_bin.append(bin_octet)
# join binary number in dotted-binary format
ip_addr_bin = ".".join(ip_addr_bin)
# print the output
print "\n%-15s %-45s" % ("IP address", "Binary")
print "%-15s %-45s\n\n" % (ip_addr, ip_addr_bin)
else:
sys.exit("Invalid IP address entered")
| Collisio-Adolebitque/pfne-2017 | pynet/learnpy_ecourse/class3/ex1_binary_converter.py | Python | gpl-3.0 | 2,354 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class taNeaItem(scrapy.Item):
titleArticle = scrapy.Field()
linkArticle = scrapy.Field()
textArticle = scrapy.Field()
| bamartos/homesearch | source/items.py | Python | gpl-2.0 | 298 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014-2015 Université Catholique de Louvain.
#
# This file is part of INGInious.
#
# INGInious is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INGInious is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with INGInious. If not, see <http://www.gnu.org/licenses/>.
""" Manages submissions """
import base64
from datetime import datetime
import json
from bson.objectid import ObjectId
import pymongo
from common.courses import Course
from frontend.backend_interface import get_job_manager
from frontend.base import get_database, get_gridfs
from frontend.parsable_text import ParsableText
from frontend.plugins.plugin_manager import PluginManager
import frontend.user as User
from frontend.user_data import UserData
def get_submission(submissionid, user_check=True):
""" Get a submission from the database """
sub = get_database().submissions.find_one({'_id': ObjectId(submissionid)})
if user_check and not user_is_submission_owner(sub):
return None
return sub
def get_submission_from_jobid(jobid):
""" Get a waiting submission from its jobid """
return get_database().submissions.find_one({'jobid': jobid})
def job_done_callback(jobid, task, job):
""" Callback called by JobManager when a job is done. Updates the submission in the database with the data returned after the completion of the job """
submission = get_submission_from_jobid(jobid)
submission = get_input_from_submission(submission)
job = _parse_text(task, job)
data = {
"status": ("done" if job["result"] == "success" or job["result"] == "failed" else "error"), # error only if error was made by INGInious
"result": job["result"],
"grade": job["grade"],
"text": job.get("text", None),
"tests": job.get("tests", None),
"problems": (job["problems"] if "problems" in job else {}),
"archive": (get_gridfs().put(base64.b64decode(job["archive"])) if "archive" in job else None)
}
# Store additional data
dont_dump = ["task", "course", "input"]
for index in job:
if index not in data and index not in dont_dump:
data[index] = job[index]
# Save submission to database
get_database().submissions.update(
{"_id": submission["_id"]},
{
"$unset": {"jobid": ""},
"$set": data
}
)
if "group" in submission:
group = get_database().groups.find_one({"_id": submission["group"]})
for username in group["users"]:
UserData(username).update_stats(submission, job)
else:
UserData(submission["username"]).update_stats(submission, job)
PluginManager.get_instance().call_hook("submission_done", submission=submission, job=job)
def add_job(task, inputdata, debug=False):
""" Add a job in the queue and returns a submission id.
task is a Task instance and inputdata is the input as a dictionary
If debug is true, more debug data will be saved
"""
if not User.is_logged_in():
raise Exception("A user must be logged in to submit an object")
username = User.get_username()
jobid = get_job_manager().new_job_id()
obj = {
"courseid": task.get_course_id(),
"taskid": task.get_id(),
"input": get_gridfs().put(
json.dumps(inputdata)),
"status": "waiting",
"jobid": jobid,
"submitted_on": datetime.now()}
if Course.get_course_descriptor_content(task.get_course_id()).get("groups", False):
group = get_database().groups.find_one({"course_id": task.get_course_id(), "users": username})
obj.update({"group": group["_id"]})
else:
obj.update({"username": username})
submissionid = get_database().submissions.insert(obj)
PluginManager.get_instance().call_hook("new_submission", submissionid=submissionid, submission=obj, jobid=jobid, inputdata=inputdata)
get_job_manager().new_job(task, inputdata, job_done_callback, "Frontend - {}".format(username), jobid, debug)
return submissionid
def get_input_from_submission(submission, only_input=False):
""" Get the input of a submission. If only_input is False, returns the full submissions with a dictionnary object at the key "input". Else, returns only the dictionnary. """
if isinstance(submission.get("input", {}), dict):
if only_input:
return submission.get("input", {})
else:
return submission
else:
inp = json.load(get_gridfs().get(submission['input']))
if only_input:
return inp
else:
submission["input"] = inp
return submission
def is_running(submissionid, user_check=True):
""" Tells if a submission is running/in queue """
submission = get_submission(submissionid, user_check)
return submission["status"] == "waiting"
def is_done(submissionid, user_check=True):
""" Tells if a submission is done and its result is available """
submission = get_submission(submissionid, user_check)
return submission["status"] == "done" or submission["status"] == "error"
def user_is_submission_owner(submission):
""" Returns true if the current user is the owner of this jobid, false else """
if not User.is_logged_in():
raise Exception("A user must be logged in to verify if he owns a jobid")
if "group" in submission:
return get_database().groups.find({"_id": submission["group"], "users": User.get_username()}).count() > 0
else:
return submission["username"] == User.get_username()
def get_user_submissions(task):
""" Get all the user's submissions for a given task """
if not User.is_logged_in():
raise Exception("A user must be logged in to get his submissions")
if Course.get_course_descriptor_content(task.get_course_id()).get("groups", False):
group = get_database().groups.find_one({"course_id": task.get_course_id(), "users": User.get_username()})
cursor = get_database().submissions.find({"group": group["_id"], "taskid": task.get_id(), "courseid": task.get_course_id()})
else:
cursor = get_database().submissions.find({"username": User.get_username(), "taskid": task.get_id(), "courseid": task.get_course_id()})
cursor.sort([("submitted_on", -1)])
return list(cursor)
def get_user_last_submissions(query, limit, one_per_task=False):
""" Get last submissions of a user """
if not User.is_logged_in():
raise Exception("A user must be logged in to get his submissions")
request = query.copy()
request.update({"$or": [
{"username": User.get_username()},
{"group": {"$in": [g["_id"] for g in get_database().groups.find({"users": User.get_username()})]}}]})
# We only want the last x task tried, modify the request
if one_per_task is True:
data = get_database().submissions.aggregate([
{"$match": request},
{"$sort": {"submitted_on": pymongo.DESCENDING}},
{"$group": {"_id": {"courseid": "$courseid", "taskid": "$taskid"}, "orig_id": {"$first": "$_id"},
"submitted_on": {"$first": "$submitted_on"}}},
{"$sort": {"submitted_on": pymongo.DESCENDING}},
{"$limit": limit}
])
request = {"_id": {"$in": [d["orig_id"] for d in list(data)]}}
cursor = get_database().submissions.find(request)
cursor.sort([("submitted_on", -1)]).limit(limit)
return list(cursor)
def _parse_text(task, job_result):
""" Parses text """
if "text" in job_result:
job_result["text"] = ParsableText(job_result["text"], task.get_response_type()).parse()
if "problems" in job_result:
for problem in job_result["problems"]:
job_result["problems"][problem] = ParsableText(job_result["problems"][problem], task.get_response_type()).parse()
return job_result
| layus/INGInious | frontend/submission_manager.py | Python | agpl-3.0 | 8,388 |
def computepay(hours,rate):
if hours <= 40.0:
return rate * hours
else:
regular = 40.0 * rate
ot = (hours-40.0)*(rate*1.5)
#return (rate * hours) + ((hours-40.0)*(rate*1.5))
return regular + ot
print(computepay(float(input("Enter Hours: ")),float(input("Enter Rate: "))))
| rlmitchell/coursera | py4e/1_python_for_everybody/ex-4-6.py | Python | gpl-3.0 | 299 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import threading
import logging
import authorizer
import abstracted_fs
import ftpserver
import openerp
from openerp.tools import config
_logger = logging.getLogger(__name__)
def start_server():
if openerp.multi_process:
_logger.info("FTP disabled in multiprocess mode")
return
if openerp.evented:
_logger.info("FTP disabled in evented mode")
return
HOST = config.get('ftp_server_host', '127.0.0.1')
PORT = int(config.get('ftp_server_port', '8021'))
PASSIVE_PORTS = None
pps = config.get('ftp_server_passive_ports', '').split(':')
if len(pps) == 2:
PASSIVE_PORTS = int(pps[0]), int(pps[1])
class ftp_server(threading.Thread):
def run(self):
autho = authorizer.authorizer()
ftpserver.FTPHandler.authorizer = autho
ftpserver.max_cons = 300
ftpserver.max_cons_per_ip = 50
ftpserver.FTPHandler.abstracted_fs = abstracted_fs.abstracted_fs
if PASSIVE_PORTS:
ftpserver.FTPHandler.passive_ports = PASSIVE_PORTS
ftpserver.log = lambda msg: _logger.info(msg)
ftpserver.logline = lambda msg: None
ftpserver.logerror = lambda msg: _logger.error(msg)
ftpd = ftpserver.FTPServer((HOST, PORT), ftpserver.FTPHandler)
ftpd.serve_forever()
if HOST.lower() == 'none':
_logger.info("\n Server FTP Not Started\n")
else:
_logger.info("\n Serving FTP on %s:%s\n" % (HOST, PORT))
ds = ftp_server()
ds.daemon = True
ds.start()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| ttfseiko/openerp-trunk | openerp/addons/document_ftp/ftpserver/__init__.py | Python | agpl-3.0 | 2,649 |
"""
Implements interface to openssl X509 and X509Store structures,
I.e allows to load, analyze and verify certificates.
X509Store objects are also used to verify other signed documets,
such as CMS, OCSP and timestamps.
"""
from ctypes import c_void_p, c_long, c_ulong, c_int, POINTER, c_char_p, Structure, cast
from ctypescrypto.bio import Membio
from ctypescrypto.pkey import PKey
from ctypescrypto.oid import Oid
from ctypescrypto.exception import LibCryptoError
from ctypescrypto import libcrypto, pyver, chartype, inttype, bintype
from datetime import datetime
import sys
try:
from pytz import utc
except ImportError:
from datetime import timedelta, tzinfo
ZERO = timedelta(0)
class UTC(tzinfo):
"""tzinfo object for UTC.
If no pytz is available, we would use it.
"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
__all__ = ['X509', 'X509Error', 'X509Name', 'X509Store', 'StackOfX509']
if hasattr(libcrypto,"X509_get_version"):
# If it is OpenSSL 1.1 or above, use accessor functions
_X509_get_version = libcrypto.X509_get_version
_X509_get_version.restype = c_long
_X509_get_version.argtypes = (c_void_p,)
_X509_get_notBefore=libcrypto.X509_getm_notBefore
_X509_get_notBefore.restype = c_void_p
_X509_get_notBefore.argtypes = (c_void_p,)
_X509_get_notAfter=libcrypto.X509_getm_notAfter
_X509_get_notAfter.restype = c_void_p
_X509_get_notAfter.argtypes = (c_void_p,)
else:
# Otherwise declare X509 structure internals and define deep poke
# functions
class _validity(Structure):
""" ctypes representation of X509_VAL structure
needed to access certificate validity period, because openssl
doesn't provide fuctions for it - only macros
"""
_fields_ = [('notBefore', c_void_p), ('notAfter', c_void_p)]
class _cinf(Structure):
""" ctypes representtion of X509_CINF structure
neede to access certificate data, which are accessable only
via macros
"""
_fields_ = [('version', c_void_p),
('serialNumber', c_void_p),
('sign_alg', c_void_p),
('issuer', c_void_p),
('validity', POINTER(_validity)),
('subject', c_void_p),
('pubkey', c_void_p),
('issuerUID', c_void_p),
('subjectUID', c_void_p),
('extensions', c_void_p),
]
class _x509(Structure):
"""
ctypes represntation of X509 structure needed
to access certificate data which are accesable only via
macros, not functions
"""
_fields_ = [('cert_info', POINTER(_cinf)),
('sig_alg', c_void_p),
('signature', c_void_p),
# There are a lot of parsed extension fields there
]
_px509 = POINTER(_x509)
def _X509_get_version(ptr):
asn1int = cast(ptr, _px509)[0].cert_info[0].version
return libcrypto.ASN1_INTEGER_get(asn1int)
def _X509_get_notBefore(ptr):
# (x)->cert_info->validity->notBefore
return cast(ptr, _px509)[0].cert_info[0].validity[0].notBefore
def _X509_get_notAfter(ptr):
return cast(ptr, _px509)[0].cert_info[0].validity[0].notAfter
if hasattr(libcrypto,'sk_num'):
sk_num = libcrypto.sk_num
sk_set = libcrypto.sk_set
sk_value = libcrypto.sk_value
sk_delete = libcrypto.sk_delete
sk_new_null = libcrypto.sk_new_null
sk_pop_free = libcrypto.sk_pop_free
sk_push = libcrypto.sk_push
else:
sk_num = libcrypto.OPENSSL_sk_num
sk_set = libcrypto.OPENSSL_sk_set
sk_value = libcrypto.OPENSSL_sk_value
sk_delete = libcrypto.OPENSSL_sk_delete
sk_new_null = libcrypto.OPENSSL_sk_new_null
sk_pop_free = libcrypto.OPENSSL_sk_pop_free
sk_push = libcrypto.OPENSSL_sk_push
class X509Error(LibCryptoError):
"""
Exception, generated when some openssl function fail
during X509 operation
"""
pass
class X509Name(object):
"""
Class which represents X.509 distinguished name - typically
a certificate subject name or an issuer name.
Now used only to represent information, extracted from the
certificate. Potentially can be also used to build DN when creating
certificate signing request
"""
# XN_FLAG_SEP_COMMA_PLUS & ASN1_STRFLG_UTF8_CONVERT
PRINT_FLAG = 0x10010
ESC_MSB = 4
def __init__(self, ptr=None, copy=False):
"""
Creates a X509Name object
@param ptr - pointer to X509_NAME C structure (as returned by some
OpenSSL functions
@param copy - indicates that this structure have to be freed upon
object destruction
"""
if ptr is not None:
self.ptr = ptr
self.need_free = copy
self.writable = False
else:
self.ptr = libcrypto.X509_NAME_new()
self.need_free = True
self.writable = True
def __del__(self):
"""
Frees if neccessary
"""
if self.need_free:
libcrypto.X509_NAME_free(self.ptr)
def __bytes__(self):
"""
Produces an ascii representation of the name, escaping all
symbols > 0x80. Probably it is not what you want, unless
your native language is English
"""
bio = Membio()
libcrypto.X509_NAME_print_ex(bio.bio, self.ptr, 0,
self.PRINT_FLAG | self.ESC_MSB)
return bio.__bytes__()
def __unicode__(self):
"""
Produces unicode representation of the name.
"""
bio = Membio()
libcrypto.X509_NAME_print_ex(bio.bio, self.ptr, 0, self.PRINT_FLAG)
return bio.__unicode__()
if pyver == 2:
__str__ = __bytes__
else:
__str__ = __unicode__
def __len__(self):
"""
return number of components in the name
"""
return libcrypto.X509_NAME_entry_count(self.ptr)
def __cmp__(self, other):
"""
Compares X509 names
"""
return libcrypto.X509_NAME_cmp(self.ptr, other.ptr)
def __eq__(self, other):
return libcrypto.X509_NAME_cmp(self.ptr, other.ptr) == 0
def __gt__(self, other):
return libcrypto.X509_NAME_cmp(self.ptr, other.ptr) > 0
def __lt__(self, other):
return libcrypto.X509_NAME_cmp(self.ptr, other.ptr) < 0
def __getitem__(self, key):
if isinstance(key, Oid):
# Return first matching field
idx = libcrypto.X509_NAME_get_index_by_NID(self.ptr, key.nid, -1)
if idx < 0:
raise KeyError("Key not found " + str(Oid))
entry = libcrypto.X509_NAME_get_entry(self.ptr, idx)
value = libcrypto.X509_NAME_ENTRY_get_data(entry)
bio = Membio()
libcrypto.ASN1_STRING_print_ex(bio.bio, value, self.PRINT_FLAG)
return chartype(bio)
elif isinstance(key, inttype):
# Return OID, string tuple
entry = libcrypto.X509_NAME_get_entry(self.ptr, key)
if entry is None:
raise IndexError("name entry index out of range")
oid = Oid.fromobj(libcrypto.X509_NAME_ENTRY_get_object(entry))
value = libcrypto.X509_NAME_ENTRY_get_data(entry)
bio = Membio()
libcrypto.ASN1_STRING_print_ex(bio.bio, value, self.PRINT_FLAG)
return (oid, chartype(bio))
else:
raise TypeError("X509 NAME can be indexed by Oids or integers only")
def __setitem__(self, key, val):
if not self.writable:
raise ValueError("Attempt to modify constant X509 object")
else:
raise NotImplementedError
def __delitem__(self, key):
if not self.writable:
raise ValueError("Attempt to modify constant X509 object")
else:
raise NotImplementedError
def __hash__(self):
return libcrypto.X509_NAME_hash(self.ptr)
class _x509_ext(Structure):
""" Represens C structure X509_EXTENSION """
_fields_ = [("object", c_void_p),
("critical", c_int),
("value", c_void_p)
]
class X509_EXT(object):
""" Python object which represents a certificate extension """
def __init__(self, ptr, copy=False):
""" Initializes from the pointer to X509_EXTENSION.
If copy is True, creates a copy, otherwise just
stores pointer.
"""
if copy:
self.ptr = libcrypto.X509_EXTENSION_dup(ptr)
else:
self.ptr = cast(ptr, POINTER(_x509_ext))
def __del__(self):
libcrypto.X509_EXTENSION_free(self.ptr)
def __bytes__(self):
bio = Membio()
libcrypto.X509V3_EXT_print(bio.bio, self.ptr, 0x20010, 0)
return bintype(bio)
def __unicode__(self):
bio = Membio()
libcrypto.X509V3_EXT_print(bio.bio, self.ptr, 0x20010, 0)
return chartype(bio)
if pyver == 2:
__str__ = __bytes__
else:
__str__ = __unicode__
@property
def oid(self):
"Returns OID of the extension"
return Oid.fromobj(self.ptr[0].object)
@property
def critical(self):
"Returns True if extensin have critical flag set"
return self.ptr[0].critical > 0
class _X509extlist(object):
"""
Represents list of certificate extensions. Really it keeps
reference to certificate object
"""
def __init__(self, cert):
"""
Initialize from X509 object
"""
self.cert = cert
def __len__(self):
"""
Returns number of extensions
"""
return libcrypto.X509_get_ext_count(self.cert.cert)
def __getitem__(self, item):
"""
Returns extension by index, creating a copy
"""
ext_ptr = libcrypto.X509_get_ext(self.cert.cert, item)
if ext_ptr is None:
raise IndexError
return X509_EXT(ext_ptr, True)
def find(self, oid):
"""
Return list of extensions with given Oid
"""
if not isinstance(oid, Oid):
raise TypeError("Need crytypescrypto.oid.Oid as argument")
found = []
index = -1
end = len(self)
while True:
index = libcrypto.X509_get_ext_by_NID(self.cert.cert, oid.nid,
index)
if index >= end or index < 0:
break
found.append(self[index])
return found
def find_critical(self, crit=True):
"""
Return list of critical extensions (or list of non-cricital, if
optional second argument is False
"""
if crit:
flag = 1
else:
flag = 0
found = []
end = len(self)
index = -1
while True:
index = libcrypto.X509_get_ext_by_critical(self.cert.cert, flag,
index)
if index >= end or index < 0:
break
found.append(self[index])
return found
def _X509__asn1date_to_datetime(asn1date):
"""
Converts openssl ASN1_TIME object to python datetime.datetime
"""
bio = Membio()
libcrypto.ASN1_TIME_print(bio.bio, asn1date)
pydate = datetime.strptime(str(bio), "%b %d %H:%M:%S %Y %Z")
return pydate.replace(tzinfo=utc)
class X509(object):
"""
Represents X.509 certificate.
"""
def __init__(self, data=None, ptr=None, format="PEM"):
"""
Initializes certificate
@param data - serialized certificate in PEM or DER format.
@param ptr - pointer to X509, returned by some openssl function.
mutually exclusive with data
@param format - specifies data format. "PEM" or "DER", default PEM
"""
if ptr is not None:
if data is not None:
raise TypeError("Cannot use data and ptr simultaneously")
self.cert = ptr
elif data is None:
raise TypeError("data argument is required")
else:
bio = Membio(data)
if format == "PEM":
self.cert = libcrypto.PEM_read_bio_X509(bio.bio, None, None,
None)
else:
self.cert = libcrypto.d2i_X509_bio(bio.bio, None)
if self.cert is None:
raise X509Error("error reading certificate")
self.extensions = _X509extlist(self)
def __del__(self):
"""
Frees certificate object
"""
libcrypto.X509_free(self.cert)
def __bytes__(self):
""" Returns der string of the certificate """
bio = Membio()
if libcrypto.i2d_X509_bio(bio.bio, self.cert) == 0:
raise X509Error("error serializing certificate")
return str(bio)
if pyver == 2:
__str__ = __bytes__
def __repr__(self):
""" Returns valid call to the constructor """
return "X509(data=" + repr(self.pem()) + ",format='PEM')"
@property
def pubkey(self):
"""EVP PKEy object of certificate public key"""
return PKey(ptr=libcrypto.X509_get_pubkey(self.cert, False))
def pem(self):
""" Returns PEM represntation of the certificate """
bio = Membio()
if libcrypto.PEM_write_bio_X509(bio.bio, self.cert) == 0:
raise X509Error("error serializing certificate")
return str(bio)
def verify(self, store=None, chain=None, key=None):
"""
Verify self. Supports verification on both X509 store object
or just public issuer key
@param store X509Store object.
@param chain - list of X509 objects to add into verification
context.These objects are untrusted, but can be used to
build certificate chain up to trusted object in the store
@param key - PKey object with open key to validate signature
parameters store and key are mutually exclusive. If neither
is specified, attempts to verify self as self-signed certificate
"""
if store is not None and key is not None:
raise X509Error("key and store cannot be specified simultaneously")
if store is not None:
ctx = libcrypto.X509_STORE_CTX_new()
if ctx is None:
raise X509Error("Error allocating X509_STORE_CTX")
if chain is not None and len(chain) > 0:
chain_ptr = StackOfX509(chain).ptr
else:
chain_ptr = None
if libcrypto.X509_STORE_CTX_init(ctx, store.store, self.cert,
chain_ptr) < 0:
raise X509Error("Error allocating X509_STORE_CTX")
res = libcrypto.X509_verify_cert(ctx)
libcrypto.X509_STORE_CTX_free(ctx)
return res > 0
else:
if key is None:
if self.issuer != self.subject:
# Not a self-signed certificate
return False
key = self.pubkey
res = libcrypto.X509_verify(self.cert, key.key)
if res < 0:
raise X509Error("X509_verify failed")
return res > 0
@property
def subject(self):
""" X509Name for certificate subject name """
return X509Name(libcrypto.X509_get_subject_name(self.cert))
@property
def issuer(self):
""" X509Name for certificate issuer name """
return X509Name(libcrypto.X509_get_issuer_name(self.cert))
@property
def serial(self):
""" Serial number of certificate as integer """
asnint = libcrypto.X509_get_serialNumber(self.cert)
bio = Membio()
libcrypto.i2a_ASN1_INTEGER(bio.bio, asnint)
return int(str(bio), 16)
@property
def version(self):
"""
certificate version as integer. Really certificate stores 0 for
version 1 and 2 for version 3, but we return 1 and 3
"""
return _X509_get_version(self.cert) + 1
@property
def startDate(self):
""" Certificate validity period start date """
asn1 = _X509_get_notBefore(self.cert)
return __asn1date_to_datetime(asn1)
@property
def endDate(self):
""" Certificate validity period end date """
asn1 = _X509_get_notAfter(self.cert)
return __asn1date_to_datetime(asn1)
def check_ca(self):
""" Returns True if certificate is CA certificate """
return libcrypto.X509_check_ca(self.cert) > 0
class X509Store(object):
"""
Represents trusted certificate store. Can be used to lookup CA
certificates to verify
@param file - file with several certificates and crls
to load into store
@param dir - hashed directory with certificates and crls
@param default - if true, default verify location (directory)
is installed
"""
def __init__(self, file=None, dir=None, default=False):
"""
Creates X509 store and installs lookup method. Optionally initializes
by certificates from given file or directory.
"""
#
# Todo - set verification flags
#
self.store = libcrypto.X509_STORE_new()
if self.store is None:
raise X509Error("allocating store")
lookup = libcrypto.X509_STORE_add_lookup(self.store,
libcrypto.X509_LOOKUP_file())
if lookup is None:
raise X509Error("error installing file lookup method")
if file is not None:
if pyver == 2:
fn = file
else:
fn = file.encode(sys.getfilesystemencoding())
if not libcrypto.X509_LOOKUP_ctrl(lookup, 1, fn, 1, None) > 0:
raise X509Error("error loading trusted certs from file "+file)
lookup = libcrypto.X509_STORE_add_lookup(self.store,
libcrypto.X509_LOOKUP_hash_dir())
if lookup is None:
raise X509Error("error installing hashed lookup method")
if dir is not None:
if pyver == 2:
dr = dir
else:
dr = dir.encode(sys.getfilesystemencoding())
if not libcrypto.X509_LOOKUP_ctrl(lookup, 2, dr, 1, None) > 0:
raise X509Error("error adding hashed trusted certs dir "+dir)
if default:
if not libcrypto.X509_LOOKUP_ctrl(lookup, 2, None, 3, None) > 0:
raise X509Error("error adding default trusted certs dir ")
def add_cert(self, cert):
"""
Explicitely adds certificate to set of trusted in the store
@param cert - X509 object to add
"""
if not isinstance(cert, X509):
raise TypeError("cert should be X509")
libcrypto.X509_STORE_add_cert(self.store, cert.cert)
def add_callback(self, callback):
"""
Installs callback function, which would receive detailed information
about verified ceritificates
"""
raise NotImplementedError
def setflags(self, flags):
"""
Set certificate verification flags.
@param flags - integer bit mask. See OpenSSL X509_V_FLAG_* constants
"""
libcrypto.X509_STORE_set_flags(self.store, flags)
def setpurpose(self, purpose):
"""
Sets certificate purpose which verified certificate should match
@param purpose - number from 1 to 9 or standard strind defined
in Openssl
possible strings - sslcient,sslserver, nssslserver, smimesign,i
smimeencrypt, crlsign, any, ocsphelper
"""
if isinstance(purpose, str):
purp_no = libcrypto.X509_PURPOSE_get_by_sname(purpose)
if purp_no <= 0:
raise X509Error("Invalid certificate purpose '%s'" % purpose)
elif isinstance(purpose, int):
purp_no = purpose
if libcrypto.X509_STORE_set_purpose(self.store, purp_no) <= 0:
raise X509Error("cannot set purpose")
def setdepth(self, depth):
"""
Sets the verification depth i.e. max length of certificate chain
which is acceptable
"""
libcrypto.X509_STORE_set_depth(self.store, depth)
def settime(self, time):
"""
Set point in time used to check validity of certificates for
Time can be either python datetime object or number of seconds
sinse epoch
"""
if isinstance(time, datetime) or isinstance(time,
datetime.date):
seconds = int(time.strftime("%s"))
elif isinstance(time, int):
seconds = time
else:
raise TypeError("datetime.date, datetime.datetime or integer " +
"is required as time argument")
raise NotImplementedError
class StackOfX509(object):
"""
Implements OpenSSL STACK_OF(X509) object.
It looks much like python container types
"""
def __init__(self, certs=None, ptr=None, disposable=True):
"""
Create stack
@param certs - list of X509 objects. If specified, read-write
stack is created and populated by these certificates
@param ptr - pointer to OpenSSL STACK_OF(X509) as returned by
some functions
@param disposable - if True, stack created from object, returned
by function is copy, and can be modified and need to be
freeid. If false, it is just pointer into another
structure i.e. CMS_ContentInfo
"""
self.need_free = False
if ptr is None:
self.need_free = True
self.ptr = sk_new_null()
if certs is not None:
for crt in certs:
self.append(crt)
elif certs is not None:
raise ValueError("cannot handle certs an ptr simultaneously")
else:
self.need_free = disposable
self.ptr = ptr
def __len__(self):
return sk_num(self.ptr)
def __getitem__(self, index):
if index < 0 or index >= len(self):
raise IndexError
p = sk_value(self.ptr, index)
return X509(ptr=libcrypto.X509_dup(p))
def __setitem__(self, index, value):
if not self.need_free:
raise ValueError("Stack is read-only")
if index < 0 or index >= len(self):
raise IndexError
if not isinstance(value, X509):
raise TypeError('StackOfX509 can contain only X509 objects')
p = sk_value(self.ptr, index)
sk_set(self.ptr, index, libcrypto.X509_dup(value.cert))
libcrypto.X509_free(p)
def __delitem__(self, index):
if not self.need_free:
raise ValueError("Stack is read-only")
if index < 0 or index >= len(self):
raise IndexError
p = sk_delete(self.ptr, index)
libcrypto.X509_free(p)
def __del__(self):
if self.need_free:
sk_pop_free(self.ptr, libcrypto.X509_free)
def append(self, value):
""" Adds certificate to stack """
if not self.need_free:
raise ValueError("Stack is read-only")
if not isinstance(value, X509):
raise TypeError('StackOfX509 can contain only X509 objects')
sk_push(self.ptr, libcrypto.X509_dup(value.cert))
libcrypto.d2i_X509_bio.argtypes = (c_void_p,POINTER(c_void_p))
libcrypto.X509_free.argtypes = (c_void_p,)
libcrypto.X509_dup.restype = c_void_p
libcrypto.X509_dup.argtypes = (c_void_p, )
libcrypto.i2a_ASN1_INTEGER.argtypes = (c_void_p, c_void_p)
libcrypto.ASN1_STRING_print_ex.argtypes = (c_void_p, c_void_p, c_long)
libcrypto.PEM_read_bio_X509.restype = c_void_p
libcrypto.PEM_read_bio_X509.argtypes = (c_void_p, POINTER(c_void_p),
c_void_p, c_void_p)
libcrypto.PEM_write_bio_X509.restype = c_int
libcrypto.PEM_write_bio_X509.argtypes = (c_void_p, c_void_p)
libcrypto.ASN1_TIME_print.argtypes = (c_void_p, c_void_p)
libcrypto.ASN1_INTEGER_get.argtypes = (c_void_p, )
libcrypto.ASN1_INTEGER_get.restype = c_long
libcrypto.X509_check_ca.argtypes = (c_void_p, )
libcrypto.X509_get_serialNumber.argtypes = (c_void_p, )
libcrypto.X509_get_serialNumber.restype = c_void_p
libcrypto.X509_get_subject_name.argtypes = (c_void_p, )
libcrypto.X509_get_subject_name.restype = c_void_p
libcrypto.X509_get_issuer_name.argtypes = (c_void_p, )
libcrypto.X509_get_issuer_name.restype = c_void_p
libcrypto.X509_NAME_ENTRY_get_object.restype = c_void_p
libcrypto.X509_NAME_ENTRY_get_object.argtypes = (c_void_p, )
libcrypto.X509_NAME_ENTRY_get_data.restype = c_void_p
libcrypto.X509_NAME_ENTRY_get_data.argtypes = (c_void_p, )
libcrypto.OBJ_obj2nid.argtypes = (c_void_p, )
libcrypto.X509_NAME_get_entry.restype = c_void_p
libcrypto.X509_NAME_get_entry.argtypes = (c_void_p, c_int)
libcrypto.X509_STORE_new.restype = c_void_p
libcrypto.X509_STORE_add_lookup.restype = c_void_p
libcrypto.X509_STORE_add_lookup.argtypes = (c_void_p, c_void_p)
libcrypto.X509_STORE_add_cert.argtypes = (c_void_p, c_void_p)
libcrypto.X509_STORE_CTX_new.restype = c_void_p
libcrypto.X509_STORE_CTX_free.argtypes = (c_void_p,)
libcrypto.X509_STORE_CTX_init.argtypes = (c_void_p, c_void_p, c_void_p,
c_void_p)
libcrypto.X509_STORE_set_depth.argtypes = (c_void_p, c_int)
libcrypto.X509_STORE_set_flags.argtypes = (c_void_p, c_ulong)
libcrypto.X509_STORE_set_purpose.argtypes = (c_void_p, c_int)
libcrypto.X509_LOOKUP_file.restype = c_void_p
libcrypto.X509_LOOKUP_hash_dir.restype = c_void_p
libcrypto.X509_LOOKUP_ctrl.restype = c_int
libcrypto.X509_LOOKUP_ctrl.argtypes = (c_void_p, c_int, c_char_p, c_long,
POINTER(c_char_p))
libcrypto.X509_EXTENSION_free.argtypes = (c_void_p, )
libcrypto.X509_EXTENSION_dup.argtypes = (c_void_p, )
libcrypto.X509_EXTENSION_dup.restype = POINTER(_x509_ext)
libcrypto.X509V3_EXT_print.argtypes = (c_void_p, POINTER(_x509_ext), c_long,
c_int)
libcrypto.X509_get_ext.restype = c_void_p
libcrypto.X509_get_ext.argtypes = (c_void_p, c_int)
libcrypto.X509_get_ext_by_critical.argtypes = (c_void_p, c_int, c_int)
libcrypto.X509_get_ext_by_NID.argtypes = (c_void_p, c_int, c_int)
libcrypto.X509_get_ext_count.argtypes = (c_void_p, )
libcrypto.X509_get_pubkey.restype = c_void_p
libcrypto.X509_get_pubkey.argtypes = (c_void_p, )
libcrypto.X509V3_EXT_print.argtypes = (c_void_p, POINTER(_x509_ext), c_long,
c_int)
libcrypto.X509_LOOKUP_file.restype = c_void_p
libcrypto.X509_LOOKUP_hash_dir.restype = c_void_p
libcrypto.X509_NAME_cmp.argtypes = (c_void_p, c_void_p)
libcrypto.X509_NAME_entry_count.argtypes = (c_void_p,)
libcrypto.X509_NAME_free.argtypes = (c_void_p,)
libcrypto.X509_NAME_new.restype = c_void_p
libcrypto.X509_NAME_print_ex.argtypes = (c_void_p, c_void_p, c_int, c_ulong)
libcrypto.X509_PURPOSE_get_by_sname.argtypes=(c_char_p,)
libcrypto.X509_verify.argtypes = (c_void_p, c_void_p)
libcrypto.X509_verify_cert.argtypes = (c_void_p,)
sk_num.restype = c_int
sk_num.argtypes= (c_void_p,)
sk_set.argtypes = (c_void_p, c_int, c_void_p)
sk_set.restype = c_void_p
sk_value.argtypes = (c_void_p, c_int)
sk_value.restype = c_void_p
sk_delete.argtypes = (c_void_p, c_int)
sk_delete.restype = c_void_p
sk_new_null.restype = c_void_p
sk_pop_free.argtypes = (c_void_p, c_void_p)
sk_push.argtypes = (c_void_p, c_void_p)
libcrypto.X509_NAME_hash.restype = c_long
libcrypto.X509_NAME_hash.argtypes = (c_void_p, )
libcrypto.X509_NAME_get_index_by_NID.argtypes = (c_void_p, c_int, c_int)
| vbwagner/ctypescrypto | ctypescrypto/x509.py | Python | mit | 28,184 |
"""
hellofriend.py
Author: Jasmine Lou
Credit: the internet and classmates
Assignment:
Write and submit an interactive Python program that asks for the user's name and age,
then prints how much older Python is than the user (based on a simple comparison of
birth year). Python's first public release occurred in 1991. Something like this:
Please tell me your name: Guido
Please tell me your age: 16
Hello, Guido. Python is 8 years older than you are!
Note that the text: "Guido" and "16" are entered by the user running the program.
The final line ("Hello...") is generated dynamically when you run the program, based
on the name and age that the user enters.
"""
name = input("Please tell me your name: ")
age = input("Please tell me your age: ")
s = "Hello, " + name +". " + "Python is {0} years older than you are!"
print(s.format ((2015 - int (age)) - 1991)) | jasminelou/Hello-friend | hellofriend.py | Python | mit | 871 |
from hashlib import sha256 as hash_new
SIZE = 1024 * 1024
def compute_hash(data, size):
"""
compute hash of storage item's data file, return hexdigest
"""
hasher = hash_new()
offset = 0
while offset < size:
buf = data.read(SIZE, offset)
offset += len(buf)
hasher.update(buf)
return hasher.hexdigest()
| bepasty/bepasty-server | src/bepasty/utils/hashing.py | Python | bsd-2-clause | 356 |
import itertools
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.http.response import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.views.generic import DetailView
from django.utils import timezone
from qsic.core.models import QSICPic
from qsic.core.utils import CalendarWeek
from qsic.core.utils import EST
from qsic.events.models import Event
from qsic.events.models import Performance
from qsic.events.utils import build_reoccuring_events
from django.template.defaultfilters import slugify
def tonight(request):
return render_to_response(
'events/tonight.html',
locals(),
context_instance=RequestContext(request)
)
def up_next(request):
# Starting with today, get the next event or 6 performances.
now = timezone.now()
today_date = now.date()
today = timezone.datetime(today_date.year, today_date.month, today_date.day, tzinfo=EST)
build_reoccuring_events(now)
# get all events for cal_week
events = [e for e in Event.objects.all().order_by('_start_dt') if e.start_dt >= today]
# get all performances not in events
performances = Performance.objects.filter(
start_dt__gte=today,
).exclude(
event__in=events
).order_by(
'start_dt'
)
events_and_perofrmances = list(itertools.chain(events, performances))
events_and_perofrmances.sort(key=lambda i: i.start_dt)
if events_and_perofrmances and isinstance(events_and_perofrmances[0], Event):
# just show the one event
up_next_type = 'event'
event = events_and_perofrmances[0]
qsic_pics_qs = QSICPic.objects.filter(event=event)
event_photo = None
if qsic_pics_qs.count():
event_photo = qsic_pics_qs[0]
elif events_and_perofrmances:
# get all the perfrmances up to the next Event
# or the next 6 perofrmances, which ever is achieved first.
up_next_type = 'perfomance'
performance_list = []
for n, o in enumerate(events_and_perofrmances):
performance_list.append(o)
if n >= 6 or o.event:
break
else:
up_next_type = None
qsic_pics = QSICPic.objects.all()
return render_to_response(
'events/up_next.html',
locals(),
context_instance=RequestContext(request)
)
def current_week(request):
# get current week and forward to that week
cur_week = CalendarWeek()
return HttpResponseRedirect(reverse('qsic:week', args=(cur_week.slug,)))
def week(request, week_slug):
"""
Show calendar for week. Events along with their corresponding
performances and performacnes without events.
"""
cal_week = CalendarWeek(week_slug)
build_reoccuring_events(cal_week.start_dt)
# get all events for cal_week
events = [e for e in Event.objects.all().order_by('_start_dt')
if e.start_dt in cal_week and not e.is_placeholder]
# get all performances not in events
performances = Performance.objects.filter(
start_dt__gte=cal_week.start_dt,
start_dt__lt=cal_week.end_dt,
).exclude(
event__in=events
).order_by(
'start_dt'
)
events_and_perofrmances = list(itertools.chain(events, performances))
events_and_perofrmances.sort(key=lambda i: i.start_dt)
# for each day in ``cal_week``, add events and performances for that day
# in the order they take place.
days = []
for day in cal_week.days():
day_start = day['date']
day_end = day['date'] + timedelta(days=1)
day_events = []
while (events_and_perofrmances and
day_start <= events_and_perofrmances[0].start_dt < day_end):
day_events.append(events_and_perofrmances.pop(0))
days.append({'name': day['name'], 'date': day['date'], 'events': day_events})
previous_week = cal_week - 1
following_week = cal_week + 1
return render_to_response(
'events/week.html',
locals(),
context_instance=RequestContext(request)
)
def current_month(request):
# TODO
pass
def month(request, month_slug):
# TODO
pass
def event_detial_view_add_slug(request, pk=None):
e = get_object_or_404(Event, id=pk)
return HttpResponseRedirect(e.url)
def performance_detail_view_add_slug(request, pk=None):
p = get_object_or_404(Performance, id=pk)
return HttpResponseRedirect(p.url)
class EventDetailView(DetailView):
template_name = 'events/event_detail.html'
model = Event
class PerformanceDetailView(DetailView):
template_name = 'events/performance_detail.html'
model = Performance | logston/qsic3 | qsic/events/views.py | Python | bsd-3-clause | 4,823 |
# $Id: mod_sipp.py 5067 2015-04-13 12:28:02Z nanang $
## Automatic test module for SIPp.
##
## This module will need a test driver for each SIPp scenario:
## - For simple scenario, i.e: make/receive call (including auth), this
## test module can auto-generate a default test driver, i.e: make call
## or apply auto answer. Just name the SIPp scenario using "uas" or
## "uac" prefix accordingly.
## - Custom test driver can be defined in a python script file containing
## a list of the PJSUA instances and another list for PJSUA expects/
## commands. The custom test driver file must use the same filename as
## the SIPp XML scenario. See samples of SIPp scenario + its driver
## in tests/pjsua/scripts-sipp/ folder for detail.
##
## Here are defined macros that can be used in the custom driver:
## - $SIPP_PORT : SIPp binding port
## - $SIPP_URI : SIPp SIP URI
## - $PJSUA_PORT[N] : binding port of PJSUA instance #N
## - $PJSUA_URI[N] : SIP URI of PJSUA instance #N
import ctypes
import time
import imp
import sys
import os
import re
import subprocess
from inc_cfg import *
import inc_const
# flags that test is running in Unix
G_INUNIX = False
if sys.platform.lower().find("win32")!=-1 or sys.platform.lower().find("microsoft")!=-1:
G_INUNIX = False
else:
G_INUNIX = True
# /dev/null handle, for redirecting output when SIPP is not in background mode
FDEVNULL = None
# SIPp executable path and param
#SIPP_PATH = '"C:\\devs\\bin\\Sipp_3.2\\sipp.exe"'
SIPP_PATH = 'sipp'
SIPP_PORT = 6000
SIPP_PARAM = "-m 1 -i 127.0.0.1 -p " + str(SIPP_PORT)
SIPP_TIMEOUT = 60
# On BG mode, SIPp doesn't require special terminal
# On non-BG mode, on win, it needs env var: "TERMINFO=c:\cygwin\usr\share\terminfo"
# TODO: on unix with BG mode, waitpid() always fails, need to be fixed
SIPP_BG_MODE = False
#SIPP_BG_MODE = not G_INUNIX
# Will be updated based on the test driver file (a .py file whose the same name as SIPp XML file)
PJSUA_INST_PARAM = []
PJSUA_EXPECTS = []
# Default PJSUA param if test driver is not available:
# - no-tcp as SIPp is on UDP only
# - id, username, and realm: to allow PJSUA sending re-INVITE with auth after receiving 401/407 response
PJSUA_DEF_PARAM = "--null-audio --max-calls=1 --no-tcp --id=sip:a@localhost --username=a --realm=*"
# Get SIPp scenario (XML file)
SIPP_SCEN_XML = ""
if ARGS[1].endswith('.xml'):
SIPP_SCEN_XML = ARGS[1]
else:
exit(-99)
# Functions for resolving macros in the test driver
def resolve_pjsua_port(mo):
return str(PJSUA_INST_PARAM[int(mo.group(1))].sip_port)
def resolve_pjsua_uri(mo):
return PJSUA_INST_PARAM[int(mo.group(1))].uri[1:-1]
def resolve_driver_macros(st):
st = re.sub("\$SIPP_PORT", str(SIPP_PORT), st)
st = re.sub("\$SIPP_URI", "sip:[email protected]:"+str(SIPP_PORT), st)
st = re.sub("\$PJSUA_PORT\[(\d+)\]", resolve_pjsua_port, st)
st = re.sub("\$PJSUA_URI\[(\d+)\]", resolve_pjsua_uri, st)
return st
# Init test driver
if os.access(SIPP_SCEN_XML[:-4]+".py", os.R_OK):
# Load test driver file (the corresponding .py file), if any
cfg_file = imp.load_source("cfg_file", SIPP_SCEN_XML[:-4]+".py")
for ua_idx, ua_param in enumerate(cfg_file.PJSUA):
ua_param = resolve_driver_macros(ua_param)
PJSUA_INST_PARAM.append(InstanceParam("pjsua"+str(ua_idx), ua_param))
PJSUA_EXPECTS = cfg_file.PJSUA_EXPECTS
else:
# Generate default test driver
if os.path.basename(SIPP_SCEN_XML)[0:3] == "uas":
# auto make call when SIPp is as UAS
ua_param = PJSUA_DEF_PARAM + " sip:127.0.0.1:" + str(SIPP_PORT)
else:
# auto answer when SIPp is as UAC
ua_param = PJSUA_DEF_PARAM + " --auto-answer=200"
PJSUA_INST_PARAM.append(InstanceParam("pjsua", ua_param))
# Start SIPp process, returning PID
def start_sipp():
global SIPP_BG_MODE
sipp_proc = None
sipp_param = SIPP_PARAM + " -sf " + SIPP_SCEN_XML
if SIPP_BG_MODE:
sipp_param = sipp_param + " -bg"
if SIPP_TIMEOUT:
sipp_param = sipp_param + " -timeout "+str(SIPP_TIMEOUT)+"s -timeout_error" + " -deadcall_wait "+str(SIPP_TIMEOUT)+"s"
# add target param
sipp_param = sipp_param + " 127.0.0.1:" + str(PJSUA_INST_PARAM[0].sip_port)
# run SIPp
fullcmd = os.path.normpath(SIPP_PATH) + " " + sipp_param
print "Running SIPP: " + fullcmd
if SIPP_BG_MODE:
sipp_proc = subprocess.Popen(fullcmd, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=G_INUNIX, universal_newlines=False)
else:
# redirect output to NULL
global FDEVNULL
#FDEVNULL = open(os.devnull, 'w')
FDEVNULL = open("logs/sipp_output.tmp", 'w')
sipp_proc = subprocess.Popen(fullcmd, shell=G_INUNIX, stdout=FDEVNULL, stderr=FDEVNULL)
if not SIPP_BG_MODE:
if sipp_proc == None or sipp_proc.poll():
return None
return sipp_proc
else:
# get SIPp child process PID
pid = 0
r = re.compile("PID=\[(\d+)\]", re.I)
while True:
line = sipp_proc.stdout.readline()
pid_r = r.search(line)
if pid_r:
pid = int(pid_r.group(1))
break
if not sipp_proc.poll():
break
if pid != 0:
# Win specific: get process handle from PID, as on win32, os.waitpid() takes process handle instead of pid
if (sys.platform == "win32"):
SYNCHRONIZE = 0x00100000
PROCESS_QUERY_INFORMATION = 0x0400
hnd = ctypes.windll.kernel32.OpenProcess(SYNCHRONIZE | PROCESS_QUERY_INFORMATION, False, pid)
pid = hnd
return pid
# Wait SIPp process to exit, returning SIPp exit code
def wait_sipp(sipp):
if not SIPP_BG_MODE:
global FDEVNULL
sipp.wait()
FDEVNULL.close()
return sipp.returncode
else:
print "Waiting SIPp (PID=" + str(sipp) + ") to exit.."
wait_cnt = 0
while True:
try:
wait_cnt = wait_cnt + 1
[pid_, ret_code] = os.waitpid(sipp, 0)
if sipp == pid_:
#print "SIPP returned ", ret_code
ret_code = ret_code >> 8
# Win specific: Close process handle
if (sys.platform == "win32"):
ctypes.windll.kernel32.CloseHandle(sipp)
return ret_code
except os.error:
if wait_cnt <= 5:
print "Retry ("+str(wait_cnt)+") waiting SIPp.."
else:
return -99
# Execute PJSUA flow
def exec_pjsua_expects(t, sipp):
# Get all PJSUA instances
ua = []
for ua_idx in range(len(PJSUA_INST_PARAM)):
ua.append(t.process[ua_idx])
ua_err_st = ""
while len(PJSUA_EXPECTS):
expect = PJSUA_EXPECTS.pop(0)
ua_idx = expect[0]
expect_st = expect[1]
send_cmd = resolve_driver_macros(expect[2])
# Handle exception in pjsua flow, to avoid zombie SIPp process
try:
if expect_st != "":
ua[ua_idx].expect(expect_st, raise_on_error = True)
if send_cmd != "":
ua[ua_idx].send(send_cmd)
except TestError, e:
ua_err_st = e.desc
break;
except:
ua_err_st = "Unknown error"
break;
# Need to poll here for handling these cases:
# - If there is no PJSUA EXPECT scenario, we must keep polling the stdout,
# otherwise PJSUA process may stuck (due to stdout pipe buffer full?).
# - last PJSUA_EXPECT contains a pjsua command that needs time to
# finish, for example "v" (re-INVITE), the SIPp XML scenario may expect
# that re-INVITE transaction to be completed and without stdout poll
# PJSUA process may stuck.
# Ideally the poll should be done contiunously until SIPp process is
# terminated.
# Update: now pjsua stdout is polled continuously by a dedicated thread,
# so the poll is no longer needed
#for ua_idx in range(len(ua)):
# ua[ua_idx].expect(inc_const.STDOUT_REFRESH, raise_on_error = False)
return ua_err_st
def sipp_err_to_str(err_code):
if err_code == 0:
return "All calls were successful"
elif err_code == 1:
return "At least one call failed"
elif err_code == 97:
return "exit on internal command. Calls may have been processed"
elif err_code == 99:
return "Normal exit without calls processed"
elif err_code == -1:
return "Fatal error (timeout)"
elif err_code == -2:
return "Fatal error binding a socket"
else:
return "Unknown error"
# Test body function
def TEST_FUNC(t):
sipp_ret_code = 0
ua_err_st = ""
sipp = start_sipp()
if not sipp:
raise TestError("Failed starting SIPp")
ua_err_st = exec_pjsua_expects(t, sipp)
sipp_ret_code = wait_sipp(sipp)
if ua_err_st != "":
raise TestError(ua_err_st)
if sipp_ret_code:
rc = ctypes.c_byte(sipp_ret_code).value
raise TestError("SIPp returned error " + str(rc) + ": " + sipp_err_to_str(rc))
# Here where it all comes together
test = TestParam(SIPP_SCEN_XML[:-4],
PJSUA_INST_PARAM,
TEST_FUNC)
| lxki/pjsip | tests/pjsua/mod_sipp.py | Python | gpl-2.0 | 8,609 |
#!/usr/bin/env python3
"""
The default output from glimmerHMM (when using the -g option) produces something LIKE
GFF but has only mRNA and CDS features. This script transforms this into the canonical
genes described in the specification.
Example input:
Supercontig_3.1 GlimmerHMM mRNA 15493 15926 . + . ID=Supercontig_3.1.path1.gene7;Name=Supercontig_3.1.path1.gene7
Supercontig_3.1 GlimmerHMM CDS 15493 15562 . + 0 ID=Supercontig_3.1.cds7.1;Parent=Supercontig_3.1.path1.gene7;Name=Supercontig_3.1.path1.gene7;Note=initial-exon
Supercontig_3.1 GlimmerHMM CDS 15853 15926 . + 2 ID=Supercontig_3.1.cds7.2;Parent=Supercontig_3.1.path1.gene7;Name=Supercontig_3.1.path1.gene7;Note=final-exon
Example output:
Supercontig_3.1 GlimmerHMM gene 15493 15926 . + . ID=Supercontig_3.1.path1.gene7;Name=Supercontig_3.1.path1.gene7
Supercontig_3.1 GlimmerHMM mRNA 15493 15926 . + . ID=Supercontig_3.1.path1.gene7.mRNA;Name=Supercontig_3.1.path1.gene7.mRNA
Supercontig_3.1 GlimmerHMM CDS 15493 15562 . + 0 ID=Supercontig_3.1.path1.gene7.cds;Parent=Supercontig_3.1.path1.gene7.mRNA;Name=Supercontig_3.1.path1.gene7;Note=initial-exon
Supercontig_3.1 GlimmerHMM exon 15493 15562 . + 0 ID=Supercontig_3.1.path1.gene7.exon.1;Parent=Supercontig_3.1.path1.gene7.mRNA;Name=Supercontig_3.1.path1.gene7;Note=initial-exon
Supercontig_3.1 GlimmerHMM CDS 15853 15926 . + 2 ID=Supercontig_3.1.path1.gene7.cds;Parent=Supercontig_3.1.path1.gene7.mRNA;Name=Supercontig_3.1.path1.gene7;Note=final-exon
Supercontig_3.1 GlimmerHMM exon 15853 15926 . + 2 ID=Supercontig_3.1.path1.gene7.exon.2;Parent=Supercontig_3.1.path1.gene7.mRNA;Name=Supercontig_3.1.path1.gene7;Note=final-exon
Author: Joshua Orvis
Contact: jorvis AT gmail
"""
import argparse
import os
import biocodegff
from collections import defaultdict
def main():
parser = argparse.ArgumentParser( description='Converts glimmerHMM GFF output to GFF3')
# output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input file to parse' )
parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created' )
args = parser.parse_args()
fout = open(args.output_file, 'w')
current_gene = None
current_mRNA = None
next_exon_num = defaultdict(int)
for line in open(args.input_file, 'r'):
if line.startswith('#'):
fout.write(line)
continue
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 9:
continue
mol_id = cols[0]
feat_type = cols[2]
feat_fmin = int(cols[3]) - 1
feat_fmax = int(cols[4])
id = biocodegff.column_9_value(cols[8], 'ID')
parent = biocodegff.column_9_value(cols[8], 'Parent')
if feat_type == 'mRNA':
gene_cols = list(cols)
gene_cols[2] = 'gene'
cols[8] = biocodegff.set_column_9_value( cols[8], 'ID', "{0}.mRNA".format(id) )
cols[8] = biocodegff.set_column_9_value( cols[8], 'Name', "{0}.mRNA".format(id) )
cols[8] = biocodegff.order_column_9(cols[8])
# print the gene and mRNA
fout.write( "{0}\n".format("\t".join(gene_cols)) )
fout.write( "{0}\n".format("\t".join(cols)) )
elif feat_type == 'CDS':
exon_cols = list(cols)
cols[8] = biocodegff.set_column_9_value( cols[8], 'ID', "{0}.cds".format(parent) )
cols[8] = biocodegff.set_column_9_value( cols[8], 'Name', "{0}.cds".format(parent) )
cols[8] = biocodegff.set_column_9_value( cols[8], 'Parent', "{0}.mRNA".format(parent) )
cols[8] = biocodegff.order_column_9(cols[8])
exon_id = "{0}.exon.{1}".format(parent, next_exon_num[parent] )
next_exon_num[parent] += 1
exon_cols[2] = 'exon'
exon_cols[7] = '.'
exon_cols[8] = biocodegff.set_column_9_value( exon_cols[8], 'ID', exon_id )
exon_cols[8] = biocodegff.set_column_9_value( exon_cols[8], 'Name', exon_id )
exon_cols[8] = biocodegff.set_column_9_value( exon_cols[8], 'Parent', "{0}.mRNA".format(parent) )
exon_cols[8] = biocodegff.order_column_9(exon_cols[8])
fout.write( "{0}\n".format("\t".join(exon_cols)) )
fout.write( "{0}\n".format("\t".join(cols)) )
if __name__ == '__main__':
main()
| jonathancrabtree/biocode | gff/convert_glimmerHMM_gff_to_gff3.py | Python | gpl-3.0 | 4,769 |
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import urllib
import sys
import codecs
from PyQt4.QtCore import Qt, SIGNAL
from PyQt4.QtGui import QFrame, QImage, QPixmap, QFileDialog
from weboob.applications.qcookboob.ui.recipe_ui import Ui_Recipe
from weboob.capabilities.base import empty
class Recipe(QFrame):
def __init__(self, recipe, backend, parent=None):
QFrame.__init__(self, parent)
self.parent = parent
self.ui = Ui_Recipe()
self.ui.setupUi(self)
self.connect(self.ui.exportButton, SIGNAL("clicked()"), self.export)
self.recipe = recipe
self.backend = backend
self.gotThumbnail()
self.ui.idEdit.setText(u'%s@%s' % (recipe.id, backend.name))
if not empty(recipe.title):
self.ui.titleLabel.setText(recipe.title)
if not empty(recipe.nb_person):
nbstr = '-'.join(str(num) for num in recipe.nb_person)
self.ui.nbPersonLabel.setText(nbstr)
else:
self.ui.nbPersonLabel.parent().hide()
if not empty(recipe.preparation_time):
self.ui.prepTimeLabel.setText('%s min' % recipe.preparation_time)
else:
self.ui.prepTimeLabel.parent().hide()
if not empty(recipe.cooking_time):
self.ui.cookingTimeLabel.setText('%s min' % recipe.cooking_time)
else:
self.ui.cookingTimeLabel.parent().hide()
if not empty(recipe.ingredients):
txt = u''
for ing in recipe.ingredients:
txt += '* %s\n' % ing
self.ui.ingredientsPlain.setPlainText('%s' % txt)
else:
self.ui.ingredientsPlain.parent().hide()
if not empty(recipe.author):
self.ui.authorLabel.setText('%s' % recipe.author)
else:
self.ui.authorLabel.parent().hide()
if not empty(recipe.instructions):
self.ui.instructionsPlain.setPlainText('%s' % recipe.instructions)
else:
self.ui.instructionsPlain.parent().hide()
if not empty(recipe.comments):
txt = u''
for com in recipe.comments:
txt += '* %s\n' % com
self.ui.commentsPlain.setPlainText('%s' % txt)
else:
self.ui.commentsPlain.parent().hide()
self.ui.verticalLayout.setAlignment(Qt.AlignTop)
self.ui.verticalLayout_2.setAlignment(Qt.AlignTop)
def gotThumbnail(self):
if not empty(self.recipe.picture_url):
data = urllib.urlopen(self.recipe.picture_url).read()
img = QImage.fromData(data)
self.ui.imageLabel.setPixmap(QPixmap.fromImage(img).scaledToWidth(250, Qt.SmoothTransformation))
def export(self):
fileDial = QFileDialog(self, 'Export "%s" recipe' %
self.recipe.title, '%s.kreml' % self.recipe.title.replace('/', ','), 'Krecipe file (*.kreml);;all files (*)')
fileDial.setAcceptMode(QFileDialog.AcceptSave)
fileDial.setLabelText(QFileDialog.Accept, 'Export recipe')
fileDial.setLabelText(QFileDialog.FileName, 'Recipe file name')
ok = (fileDial.exec_() == 1)
if not ok:
return
result = fileDial.selectedFiles()
if len(result) > 0:
dest = unicode(result[0])
if not dest.endswith('.kreml'):
dest += '.kreml'
data = self.recipe.toKrecipesXml(author=self.backend.name)
try:
with codecs.open(dest, 'w', 'utf-8') as f:
f.write(data)
except IOError as e:
print >>sys.stderr, 'Unable to write Krecipe file in "%s": %s' % (dest, e)
return 1
return
| yannrouillard/weboob | weboob/applications/qcookboob/recipe.py | Python | agpl-3.0 | 4,427 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Inventory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('transaction_date', models.DateTimeField(verbose_name=b'Date of Transaction')),
('transaction_type', models.IntegerField(default=1, verbose_name=b'Type of transaction', choices=[(2, b'Subtract from Inventory'), (1, b'Add to Inventory')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LibraryBook',
fields=[
('isbn_13', models.CharField(primary_key=True, serialize=False, max_length=13, unique=True, verbose_name=b'ISBN-13', db_index=True)),
('isbn_10', models.CharField(max_length=10, verbose_name=b'ISBN-10', blank=True)),
('title', models.CharField(max_length=250, verbose_name=b'Title of book', db_index=True)),
('author', models.TextField(max_length=1000, verbose_name=b'Comma-separated lis of authors.')),
('publisher', models.CharField(max_length=200, verbose_name=b'Name of imprint or publisher.')),
('publish_date', models.PositiveIntegerField(null=True, verbose_name=b'Publication date')),
('description', models.TextField(max_length=2000, verbose_name=b'Summary of book', blank=True)),
('genre', models.CharField(max_length=500, verbose_name=b'Genre', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Store',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100, verbose_name=b'Name of Store')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserToStore',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('permission', models.CharField(default=b'rw', max_length=15, verbose_name=b'Kind of access granted to user.', choices=[(b'r', b'Read'), (b'w', b'Write'), (b'rw', b'Read-Write')])),
('store', models.ForeignKey(to='bookservices.Store')),
('user', models.ForeignKey(related_name='storerel', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='store',
name='allowedUsers',
field=models.ManyToManyField(related_name='store', through='bookservices.UserToStore', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='inventory',
name='book',
field=models.ForeignKey(to='bookservices.LibraryBook'),
preserve_default=True,
),
migrations.AddField(
model_name='inventory',
name='store',
field=models.ForeignKey(to='bookservices.Store'),
preserve_default=True,
),
]
| sydneycarton/NeatMechanicalToy | app/bookbeeper/bookservices/migrations/0001_initial.py | Python | apache-2.0 | 3,684 |
"""
VerseBot for Reddit
By Matthieu Grieger
Continued By Team VerseBot
response.py
Copyright (c) 2015 Matthieu Grieger (MIT License)
"""
MAXIMUM_MESSAGE_LENGTH = 4000
class Response:
""" Class that holds the properties and methods of a comment
response. """
def __init__(self, message, parser, link=None):
""" Initializes a Response object. """
self.verse_list = list()
self.message = message
self.parser = parser
self.response = ""
if link is not None:
self.link = link
else:
self.link = ''
def add_verse(self, verse):
""" Adds a verse to the verse list.
:param verse: Verse to add to the list of verses
"""
self.verse_list.append(verse)
def is_duplicate_verse(self, verse):
""" Checks the incoming verse against the verse list to make sure
it is not a duplicate.
:param verse: Verse to check duplicates for
"""
for v in self.verse_list:
if (v.book == verse.book and
v.chapter == verse.chapter and
v.verse == verse.verse and
v.translation == verse.translation):
return True
return False
def construct_message(self):
""" Constructs a message response. """
for verse in self.verse_list:
verse.get_contents()
if verse.contents is not None:
if verse.verse is not None:
self.response += ("[**%s %d:%s | %s**](%s)\n\n>"
% (verse.book, verse.chapter,
verse.verse, verse.translation_title,
verse.permalink))
else:
self.response += ("[**%s %d | %s**](%s)\n\n>"
% (verse.book, verse.chapter,
verse.translation_title,
verse.permalink))
self.response += verse.contents
self.response += "\n\n"
if self.response == "":
return None
else:
if self.exceeds_max_length():
self.response = self.generate_overflow_response()
# self.response += self.get_comment_footer()
return self.response
def exceeds_max_length(self):
""" Returns true if the current response exceeds the maximum comment
length, returns false otherwise. """
return len(self.response) > MAXIMUM_MESSAGE_LENGTH
def generate_overflow_response(self):
""" Constructs and generates an overflow comment whenever the comment
exceeds the character limit set by MAXIMUM_MESSAGE_LENGTH. Instead of
posting the contents of the verse(s) in the comment, it links to
webpages that contain the contents of the verse(s). """
comment = ("The contents of the verse(s) you quoted exceed the %d "
"character limit. Instead, here are links to the "
"verse(s)!\n\n" % MAXIMUM_MESSAGE_LENGTH)
for verse in self.verse_list:
if verse.translation == "JPS":
overflow_link = verse.permalink
else:
if verse.verse is not None:
overflow_link = ("https://www.biblegateway.com/passage/"
"?search=%s+%s:%s&version=%s"
% (verse.book, verse.chapter, verse.verse,
verse.translation))
else:
overflow_link = verse.permalink
if verse.verse is not None:
comment += ("- [%s %d:%s (%s)](%s)\n\n"
% (verse.book, verse.chapter, verse.verse,
verse.translation, overflow_link))
else:
comment += ("- [%s %d (%s)](%s)\n\n"
% (verse.book, verse.chapter, verse.translation,
overflow_link))
return comment
'''
def get_comment_footer(self):
""" Returns the footer for the comment. """
return ("\n***\n[^Code](https://github.com/Team-VerseBot/versebot) ^|"
" ^/r/VerseBot ^| [^Contact ^Devs](https://github.com/"
"Team-VerseBot/versebot/issues) ^|"
" [^Usage](https://github.com/Team-VerseBot/versebot/blob/"
"master/README.md) ^|"
" [^Changelog](https://github.com/Team-VerseBot/versebot/blob/"
"master/CHANGELOG.md) ^|"
" [^Stats](http://adamgrieger.com/versebot/) ^|"
" [^Set ^a ^Default ^Translation](http://adamgrieger.com/"
"versebot#defaults) \n\n"
"^All ^texts ^provided ^by [^BibleGateway]"
"(http://biblegateway.com) ^and [^Bible ^Hub]"
"(http://biblehub.com)^. \n\n"
" ^Mistake? ^%(user)s ^can [^edit](/message/compose/"
"?to=%(bot)s&subject=edit+request&message={%(link)s} "
"Please+enter+your+revised+verse+quotations+below+in+the+usual"
"+bracketed+syntax.)"
" ^or [^delete](/message/compose/?to=%(bot)s&subject=delete"
"+request&message={%(link)s} "
"This+action+cannot+be+reversed!) ^this ^comment."
% {"user": self.message.author, "bot": REDDIT_USERNAME,
"link": self.link})
'''
| Matthew-Arnold/slack-versebot | versebot/response.py | Python | mit | 5,650 |
# Copyright (c) 2007 Red Hat, Inc.
# Copyright (c) 2009, 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys, re
import shutil
import subprocess
import string
import pykickstart.sections as kssections
import pykickstart.commands as kscommands
import pykickstart.constants as ksconstants
import pykickstart.errors as kserrors
import pykickstart.parser as ksparser
import pykickstart.version as ksversion
from pykickstart.handlers.control import commandMap
from pykickstart.handlers.control import dataMap
from mic import msger
from mic.utils import errors, misc, runner, fs_related as fs
from custom_commands import desktop, micrepo, micboot, partition
AUTH_URL_PTN = r"(?P<scheme>.*)://(?P<username>.*)(:?P<password>.*)?@(?P<url>.*)"
class PrepackageSection(kssections.Section):
sectionOpen = "%prepackages"
def handleLine(self, line):
if not self.handler:
return
(h, s, t) = line.partition('#')
line = h.rstrip()
self.handler.prepackages.add([line])
def handleHeader(self, lineno, args):
kssections.Section.handleHeader(self, lineno, args)
class AttachmentSection(kssections.Section):
sectionOpen = "%attachment"
def handleLine(self, line):
if not self.handler:
return
(h, s, t) = line.partition('#')
line = h.rstrip()
self.handler.attachment.add([line])
def handleHeader(self, lineno, args):
kssections.Section.handleHeader(self, lineno, args)
def apply_wrapper(func):
def wrapper(*kargs, **kwargs):
try:
func(*kargs, **kwargs)
except (OSError, IOError, errors.KsError), err:
cfgcls = kargs[0].__class__.__name__
if msger.ask("Failed to apply %s, skip and continue?" % cfgcls):
msger.warning("%s" % err)
pass
else:
# just throw out the exception
raise
return wrapper
def read_kickstart(path):
"""Parse a kickstart file and return a KickstartParser instance.
This is a simple utility function which takes a path to a kickstart file,
parses it and returns a pykickstart KickstartParser instance which can
be then passed to an ImageCreator constructor.
If an error occurs, a CreatorError exception is thrown.
"""
#version = ksversion.makeVersion()
#ks = ksparser.KickstartParser(version)
using_version = ksversion.DEVEL
commandMap[using_version]["desktop"] = desktop.Mic_Desktop
commandMap[using_version]["repo"] = micrepo.Mic_Repo
commandMap[using_version]["bootloader"] = micboot.Mic_Bootloader
commandMap[using_version]["part"] = partition.Mic_Partition
commandMap[using_version]["partition"] = partition.Mic_Partition
dataMap[using_version]["RepoData"] = micrepo.Mic_RepoData
dataMap[using_version]["PartData"] = partition.Mic_PartData
superclass = ksversion.returnClassForVersion(version=using_version)
class KSHandlers(superclass):
def __init__(self):
superclass.__init__(self, mapping=commandMap[using_version])
self.prepackages = ksparser.Packages()
self.attachment = ksparser.Packages()
ks = ksparser.KickstartParser(KSHandlers(), errorsAreFatal=False)
ks.registerSection(PrepackageSection(ks.handler))
ks.registerSection(AttachmentSection(ks.handler))
try:
ks.readKickstart(path)
except (kserrors.KickstartParseError, kserrors.KickstartError), err:
if msger.ask("Errors occured on kickstart file, skip and continue?"):
msger.warning("%s" % err)
pass
else:
raise errors.KsError("%s" % err)
return ks
class KickstartConfig(object):
"""A base class for applying kickstart configurations to a system."""
def __init__(self, instroot):
self.instroot = instroot
def path(self, subpath):
return self.instroot + subpath
def _check_sysconfig(self):
if not os.path.exists(self.path("/etc/sysconfig")):
fs.makedirs(self.path("/etc/sysconfig"))
def chroot(self):
os.chroot(self.instroot)
os.chdir("/")
def call(self, args):
if not os.path.exists("%s/%s" %(self.instroot, args[0])):
raise errors.KsError("Can't find %s in chroot" % args[0])
subprocess.call(args, preexec_fn = self.chroot)
def apply(self):
pass
class LanguageConfig(KickstartConfig):
"""A class to apply a kickstart language configuration to a system."""
@apply_wrapper
def apply(self, kslang):
self._check_sysconfig()
if kslang.lang:
f = open(self.path("/etc/sysconfig/i18n"), "w+")
f.write("LANG=\"" + kslang.lang + "\"\n")
f.close()
class KeyboardConfig(KickstartConfig):
"""A class to apply a kickstart keyboard configuration to a system."""
@apply_wrapper
def apply(self, kskeyboard):
#
# FIXME:
# should this impact the X keyboard config too?
# or do we want to make X be able to do this mapping?
#
#k = rhpl.keyboard.Keyboard()
#if kskeyboard.keyboard:
# k.set(kskeyboard.keyboard)
#k.write(self.instroot)
pass
class TimezoneConfig(KickstartConfig):
"""A class to apply a kickstart timezone configuration to a system."""
@apply_wrapper
def apply(self, kstimezone):
self._check_sysconfig()
tz = kstimezone.timezone or "America/New_York"
utc = str(kstimezone.isUtc)
f = open(self.path("/etc/sysconfig/clock"), "w+")
f.write("ZONE=\"" + tz + "\"\n")
f.write("UTC=" + utc + "\n")
f.close()
tz_source = "/usr/share/zoneinfo/%s" % (tz)
tz_dest = "/etc/localtime"
try:
cpcmd = fs.find_binary_inchroot('cp', self.instroot)
if cpcmd:
self.call([cpcmd, "-f", tz_source, tz_dest])
else:
cpcmd = fs.find_binary_path('cp')
subprocess.call([cpcmd, "-f",
self.path(tz_source),
self.path(tz_dest)])
except (IOError, OSError), (errno, msg):
raise errors.KsError("Timezone setting error: %s" % msg)
class AuthConfig(KickstartConfig):
"""A class to apply a kickstart authconfig configuration to a system."""
@apply_wrapper
def apply(self, ksauthconfig):
auth = ksauthconfig.authconfig or "--useshadow --enablemd5"
args = ["/usr/share/authconfig/authconfig.py", "--update", "--nostart"]
self.call(args + auth.split())
class FirewallConfig(KickstartConfig):
"""A class to apply a kickstart firewall configuration to a system."""
@apply_wrapper
def apply(self, ksfirewall):
#
# FIXME: should handle the rest of the options
#
if not os.path.exists(self.path("/usr/sbin/lokkit")):
return
if ksfirewall.enabled:
status = "--enabled"
else:
status = "--disabled"
self.call(["/usr/sbin/lokkit",
"-f", "--quiet", "--nostart", status])
class RootPasswordConfig(KickstartConfig):
"""A class to apply a kickstart root password configuration to a system."""
def unset(self):
self.call(["/usr/bin/passwd", "-d", "root"])
def set_encrypted(self, password):
self.call(["/usr/sbin/usermod", "-p", password, "root"])
def set_unencrypted(self, password):
for p in ("/bin/echo", "/usr/sbin/chpasswd"):
if not os.path.exists("%s/%s" %(self.instroot, p)):
raise errors.KsError("Unable to set unencrypted password due "
"to lack of %s" % p)
p1 = subprocess.Popen(["/bin/echo", "root:%s" %password],
stdout = subprocess.PIPE,
preexec_fn = self.chroot)
p2 = subprocess.Popen(["/usr/sbin/chpasswd", "-m"],
stdin = p1.stdout,
stdout = subprocess.PIPE,
preexec_fn = self.chroot)
p2.communicate()
@apply_wrapper
def apply(self, ksrootpw):
if ksrootpw.isCrypted:
self.set_encrypted(ksrootpw.password)
elif ksrootpw.password != "":
self.set_unencrypted(ksrootpw.password)
else:
self.unset()
class UserConfig(KickstartConfig):
def set_empty_passwd(self, user):
self.call(["/usr/bin/passwd", "-d", user])
def set_encrypted_passwd(self, user, password):
self.call(["/usr/sbin/usermod", "-p", "%s" % password, user])
def set_unencrypted_passwd(self, user, password):
for p in ("/bin/echo", "/usr/sbin/chpasswd"):
if not os.path.exists("%s/%s" %(self.instroot, p)):
raise errors.KsError("Unable to set unencrypted password due "
"to lack of %s" % p)
p1 = subprocess.Popen(["/bin/echo", "%s:%s" %(user, password)],
stdout = subprocess.PIPE,
preexec_fn = self.chroot)
p2 = subprocess.Popen(["/usr/sbin/chpasswd", "-m"],
stdin = p1.stdout,
stdout = subprocess.PIPE,
preexec_fn = self.chroot)
p2.communicate()
def addUser(self, userconfig):
args = [ "/usr/sbin/useradd" ]
if userconfig.groups:
args += [ "--groups", string.join(userconfig.groups, ",") ]
if userconfig.name:
args += [ "-m"]
args += [ "-d", "/home/%s" % userconfig.name ]
args.append(userconfig.name)
try:
dev_null = os.open("/dev/null", os.O_WRONLY)
msger.debug('adding user with %s' % args)
subprocess.call(args,
stdout = dev_null,
stderr = dev_null,
preexec_fn = self.chroot)
os.close(dev_null)
except:
msger.warning('Cannot add user using "useradd"')
if userconfig.password not in (None, ""):
if userconfig.isCrypted:
self.set_encrypted_passwd(userconfig.name,
userconfig.password)
else:
self.set_unencrypted_passwd(userconfig.name,
userconfig.password)
else:
self.set_empty_passwd(userconfig.name)
else:
raise errors.KsError("Invalid kickstart command: %s" \
% userconfig.__str__())
@apply_wrapper
def apply(self, user):
for userconfig in user.userList:
self.addUser(userconfig)
class ServicesConfig(KickstartConfig):
"""A class to apply a kickstart services configuration to a system."""
@apply_wrapper
def apply(self, ksservices):
if not os.path.exists(self.path("/sbin/chkconfig")):
return
for s in ksservices.enabled:
self.call(["/sbin/chkconfig", s, "on"])
for s in ksservices.disabled:
self.call(["/sbin/chkconfig", s, "off"])
class XConfig(KickstartConfig):
"""A class to apply a kickstart X configuration to a system."""
@apply_wrapper
def apply(self, ksxconfig):
if ksxconfig.startX and os.path.exists(self.path("/etc/inittab")):
f = open(self.path("/etc/inittab"), "rw+")
buf = f.read()
buf = buf.replace("id:3:initdefault", "id:5:initdefault")
f.seek(0)
f.write(buf)
f.close()
if ksxconfig.defaultdesktop:
self._check_sysconfig()
f = open(self.path("/etc/sysconfig/desktop"), "w")
f.write("DESKTOP="+ksxconfig.defaultdesktop+"\n")
f.close()
class DesktopConfig(KickstartConfig):
"""A class to apply a kickstart desktop configuration to a system."""
@apply_wrapper
def apply(self, ksdesktop):
if ksdesktop.defaultdesktop:
self._check_sysconfig()
f = open(self.path("/etc/sysconfig/desktop"), "w")
f.write("DESKTOP="+ksdesktop.defaultdesktop+"\n")
f.close()
if os.path.exists(self.path("/etc/gdm/custom.conf")):
f = open(self.path("/etc/skel/.dmrc"), "w")
f.write("[Desktop]\n")
f.write("Session="+ksdesktop.defaultdesktop.lower()+"\n")
f.close()
if ksdesktop.session:
if os.path.exists(self.path("/etc/sysconfig/uxlaunch")):
f = open(self.path("/etc/sysconfig/uxlaunch"), "a+")
f.write("session="+ksdesktop.session.lower()+"\n")
f.close()
if ksdesktop.autologinuser:
self._check_sysconfig()
f = open(self.path("/etc/sysconfig/desktop"), "a+")
f.write("AUTOLOGIN_USER=" + ksdesktop.autologinuser + "\n")
f.close()
if os.path.exists(self.path("/etc/gdm/custom.conf")):
f = open(self.path("/etc/gdm/custom.conf"), "w")
f.write("[daemon]\n")
f.write("AutomaticLoginEnable=true\n")
f.write("AutomaticLogin=" + ksdesktop.autologinuser + "\n")
f.close()
class MoblinRepoConfig(KickstartConfig):
"""A class to apply a kickstart desktop configuration to a system."""
def __create_repo_section(self, repo, type, fd):
baseurl = None
mirrorlist = None
reposuffix = {"base":"", "debuginfo":"-debuginfo", "source":"-source"}
reponame = repo.name + reposuffix[type]
if type == "base":
if repo.baseurl:
baseurl = repo.baseurl
if repo.mirrorlist:
mirrorlist = repo.mirrorlist
elif type == "debuginfo":
if repo.baseurl:
if repo.baseurl.endswith("/"):
baseurl = os.path.dirname(os.path.dirname(repo.baseurl))
else:
baseurl = os.path.dirname(repo.baseurl)
baseurl += "/debug"
if repo.mirrorlist:
variant = repo.mirrorlist[repo.mirrorlist.find("$"):]
mirrorlist = repo.mirrorlist[0:repo.mirrorlist.find("$")]
mirrorlist += "debug" + "-" + variant
elif type == "source":
if repo.baseurl:
if repo.baseurl.endswith("/"):
baseurl = os.path.dirname(
os.path.dirname(
os.path.dirname(repo.baseurl)))
else:
baseurl = os.path.dirname(os.path.dirname(repo.baseurl))
baseurl += "/source"
if repo.mirrorlist:
variant = repo.mirrorlist[repo.mirrorlist.find("$"):]
mirrorlist = repo.mirrorlist[0:repo.mirrorlist.find("$")]
mirrorlist += "source" + "-" + variant
fd.write("[" + reponame + "]\n")
fd.write("name=" + reponame + "\n")
fd.write("failovermethod=priority\n")
if baseurl:
auth_url = re.compile(AUTH_URL_PTN)
m = auth_url.match(baseurl)
if m:
baseurl = "%s://%s" % (m.group('scheme'), m.group('url'))
fd.write("baseurl=" + baseurl + "\n")
if mirrorlist:
fd.write("mirrorlist=" + mirrorlist + "\n")
""" Skip saving proxy settings """
#if repo.proxy:
# fd.write("proxy=" + repo.proxy + "\n")
#if repo.proxy_username:
# fd.write("proxy_username=" + repo.proxy_username + "\n")
#if repo.proxy_password:
# fd.write("proxy_password=" + repo.proxy_password + "\n")
if repo.gpgkey:
fd.write("gpgkey=" + repo.gpgkey + "\n")
fd.write("gpgcheck=1\n")
else:
fd.write("gpgcheck=0\n")
if type == "source" or type == "debuginfo" or repo.disable:
fd.write("enabled=0\n")
else:
fd.write("enabled=1\n")
fd.write("\n")
def __create_repo_file(self, repo, repodir):
fs.makedirs(self.path(repodir))
f = open(self.path(repodir + "/" + repo.name + ".repo"), "w")
self.__create_repo_section(repo, "base", f)
if repo.debuginfo:
self.__create_repo_section(repo, "debuginfo", f)
if repo.source:
self.__create_repo_section(repo, "source", f)
f.close()
@apply_wrapper
def apply(self, ksrepo, repodata, repourl):
for repo in ksrepo.repoList:
if repo.name in repourl:
repo.baseurl = repourl[repo.name]
if repo.save:
#self.__create_repo_file(repo, "/etc/yum.repos.d")
self.__create_repo_file(repo, "/etc/zypp/repos.d")
""" Import repo gpg keys """
if repodata:
for repo in repodata:
if repo['repokey']:
runner.quiet(['rpm',
"--root=%s" % self.instroot,
"--import",
repo['repokey']])
class RPMMacroConfig(KickstartConfig):
"""A class to apply the specified rpm macros to the filesystem"""
@apply_wrapper
def apply(self, ks):
if not ks:
return
if not os.path.exists(self.path("/etc/rpm")):
os.mkdir(self.path("/etc/rpm"))
f = open(self.path("/etc/rpm/macros.imgcreate"), "w+")
if exclude_docs(ks):
f.write("%_excludedocs 1\n")
f.write("%__file_context_path %{nil}\n")
if inst_langs(ks) != None:
f.write("%_install_langs ")
f.write(inst_langs(ks))
f.write("\n")
f.close()
class NetworkConfig(KickstartConfig):
"""A class to apply a kickstart network configuration to a system."""
def write_ifcfg(self, network):
p = self.path("/etc/sysconfig/network-scripts/ifcfg-" + network.device)
f = file(p, "w+")
os.chmod(p, 0644)
f.write("DEVICE=%s\n" % network.device)
f.write("BOOTPROTO=%s\n" % network.bootProto)
if network.bootProto.lower() == "static":
if network.ip:
f.write("IPADDR=%s\n" % network.ip)
if network.netmask:
f.write("NETMASK=%s\n" % network.netmask)
if network.onboot:
f.write("ONBOOT=on\n")
else:
f.write("ONBOOT=off\n")
if network.essid:
f.write("ESSID=%s\n" % network.essid)
if network.ethtool:
if network.ethtool.find("autoneg") == -1:
network.ethtool = "autoneg off " + network.ethtool
f.write("ETHTOOL_OPTS=%s\n" % network.ethtool)
if network.bootProto.lower() == "dhcp":
if network.hostname:
f.write("DHCP_HOSTNAME=%s\n" % network.hostname)
if network.dhcpclass:
f.write("DHCP_CLASSID=%s\n" % network.dhcpclass)
if network.mtu:
f.write("MTU=%s\n" % network.mtu)
f.close()
def write_wepkey(self, network):
if not network.wepkey:
return
p = self.path("/etc/sysconfig/network-scripts/keys-" + network.device)
f = file(p, "w+")
os.chmod(p, 0600)
f.write("KEY=%s\n" % network.wepkey)
f.close()
def write_sysconfig(self, useipv6, hostname, gateway):
path = self.path("/etc/sysconfig/network")
f = file(path, "w+")
os.chmod(path, 0644)
f.write("NETWORKING=yes\n")
if useipv6:
f.write("NETWORKING_IPV6=yes\n")
else:
f.write("NETWORKING_IPV6=no\n")
if hostname:
f.write("HOSTNAME=%s\n" % hostname)
else:
f.write("HOSTNAME=localhost.localdomain\n")
if gateway:
f.write("GATEWAY=%s\n" % gateway)
f.close()
def write_hosts(self, hostname):
localline = ""
if hostname and hostname != "localhost.localdomain":
localline += hostname + " "
l = hostname.split(".")
if len(l) > 1:
localline += l[0] + " "
localline += "localhost.localdomain localhost"
path = self.path("/etc/hosts")
f = file(path, "w+")
os.chmod(path, 0644)
f.write("127.0.0.1\t\t%s\n" % localline)
f.write("::1\t\tlocalhost6.localdomain6 localhost6\n")
f.close()
def write_resolv(self, nodns, nameservers):
if nodns or not nameservers:
return
path = self.path("/etc/resolv.conf")
f = file(path, "w+")
os.chmod(path, 0644)
for ns in (nameservers):
if ns:
f.write("nameserver %s\n" % ns)
f.close()
@apply_wrapper
def apply(self, ksnet):
fs.makedirs(self.path("/etc/sysconfig/network-scripts"))
useipv6 = False
nodns = False
hostname = None
gateway = None
nameservers = None
for network in ksnet.network:
if not network.device:
raise errors.KsError("No --device specified with "
"network kickstart command")
if (network.onboot and network.bootProto.lower() != "dhcp" and
not (network.ip and network.netmask)):
raise errors.KsError("No IP address and/or netmask "
"specified with static "
"configuration for '%s'" %
network.device)
self.write_ifcfg(network)
self.write_wepkey(network)
if network.ipv6:
useipv6 = True
if network.nodns:
nodns = True
if network.hostname:
hostname = network.hostname
if network.gateway:
gateway = network.gateway
if network.nameserver:
nameservers = network.nameserver.split(",")
self.write_sysconfig(useipv6, hostname, gateway)
self.write_hosts(hostname)
self.write_resolv(nodns, nameservers)
def get_image_size(ks, default = None):
__size = 0
for p in ks.handler.partition.partitions:
if p.mountpoint == "/" and p.size:
__size = p.size
if __size > 0:
return int(__size) * 1024L * 1024L
else:
return default
def get_image_fstype(ks, default = None):
for p in ks.handler.partition.partitions:
if p.mountpoint == "/" and p.fstype:
return p.fstype
return default
def get_image_fsopts(ks, default = None):
for p in ks.handler.partition.partitions:
if p.mountpoint == "/" and p.fsopts:
return p.fsopts
return default
def get_modules(ks):
devices = []
if isinstance(ks.handler.device, kscommands.device.FC3_Device):
devices.append(ks.handler.device)
else:
devices.extend(ks.handler.device.deviceList)
modules = []
for device in devices:
if not device.moduleName:
continue
modules.extend(device.moduleName.split(":"))
return modules
def get_timeout(ks, default = None):
if not hasattr(ks.handler.bootloader, "timeout"):
return default
if ks.handler.bootloader.timeout is None:
return default
return int(ks.handler.bootloader.timeout)
def get_kernel_args(ks, default = "ro rd.live.image"):
if not hasattr(ks.handler.bootloader, "appendLine"):
return default
if ks.handler.bootloader.appendLine is None:
return default
return "%s %s" %(default, ks.handler.bootloader.appendLine)
def get_menu_args(ks, default = ""):
if not hasattr(ks.handler.bootloader, "menus"):
return default
if ks.handler.bootloader.menus in (None, ""):
return default
return "%s" % ks.handler.bootloader.menus
def get_default_kernel(ks, default = None):
if not hasattr(ks.handler.bootloader, "default"):
return default
if not ks.handler.bootloader.default:
return default
return ks.handler.bootloader.default
def get_repos(ks, repo_urls=None):
repos = {}
for repo in ks.handler.repo.repoList:
inc = []
if hasattr(repo, "includepkgs"):
inc.extend(repo.includepkgs)
exc = []
if hasattr(repo, "excludepkgs"):
exc.extend(repo.excludepkgs)
baseurl = repo.baseurl
mirrorlist = repo.mirrorlist
if repo_urls and repo.name in repo_urls:
baseurl = repo_urls[repo.name]
mirrorlist = None
if repos.has_key(repo.name):
msger.warning("Overriding already specified repo %s" %(repo.name,))
proxy = None
if hasattr(repo, "proxy"):
proxy = repo.proxy
proxy_username = None
if hasattr(repo, "proxy_username"):
proxy_username = repo.proxy_username
proxy_password = None
if hasattr(repo, "proxy_password"):
proxy_password = repo.proxy_password
if hasattr(repo, "debuginfo"):
debuginfo = repo.debuginfo
if hasattr(repo, "source"):
source = repo.source
if hasattr(repo, "gpgkey"):
gpgkey = repo.gpgkey
if hasattr(repo, "disable"):
disable = repo.disable
ssl_verify = True
if hasattr(repo, "ssl_verify"):
ssl_verify = repo.ssl_verify == "yes"
nocache = False
if hasattr(repo, "nocache"):
nocache = repo.nocache
cost = None
if hasattr(repo, "cost"):
cost = repo.cost
priority = None
if hasattr(repo, "priority"):
priority = repo.priority
repos[repo.name] = (repo.name, baseurl, mirrorlist, inc, exc,
proxy, proxy_username, proxy_password, debuginfo,
source, gpgkey, disable, ssl_verify, nocache,
cost, priority)
return repos.values()
def convert_method_to_repo(ks):
try:
ks.handler.repo.methodToRepo()
except (AttributeError, kserrors.KickstartError):
pass
def get_attachment(ks, required=()):
return ks.handler.attachment.packageList + list(required)
def get_pre_packages(ks, required=()):
return ks.handler.prepackages.packageList + list(required)
def get_packages(ks, required=()):
return ks.handler.packages.packageList + list(required)
def get_groups(ks, required=()):
return ks.handler.packages.groupList + list(required)
def get_excluded(ks, required=()):
return ks.handler.packages.excludedList + list(required)
def get_partitions(ks):
return ks.handler.partition.partitions
def ignore_missing(ks):
return ks.handler.packages.handleMissing == ksconstants.KS_MISSING_IGNORE
def exclude_docs(ks):
return ks.handler.packages.excludeDocs
def inst_langs(ks):
if hasattr(ks.handler.packages, "instLange"):
return ks.handler.packages.instLange
elif hasattr(ks.handler.packages, "instLangs"):
return ks.handler.packages.instLangs
return ""
def get_post_scripts(ks):
scripts = []
for s in ks.handler.scripts:
if s.type != ksparser.KS_SCRIPT_POST:
continue
scripts.append(s)
return scripts
def add_repo(ks, repostr):
args = repostr.split()
repoobj = ks.handler.repo.parse(args[1:])
if repoobj and repoobj not in ks.handler.repo.repoList:
ks.handler.repo.repoList.append(repoobj)
def remove_all_repos(ks):
while len(ks.handler.repo.repoList) != 0:
del ks.handler.repo.repoList[0]
def remove_duplicate_repos(ks):
i = 0
j = i + 1
while True:
if len(ks.handler.repo.repoList) < 2:
break
if i >= len(ks.handler.repo.repoList) - 1:
break
name = ks.handler.repo.repoList[i].name
baseurl = ks.handler.repo.repoList[i].baseurl
if j < len(ks.handler.repo.repoList):
if (ks.handler.repo.repoList[j].name == name or \
ks.handler.repo.repoList[j].baseurl == baseurl):
del ks.handler.repo.repoList[j]
else:
j += 1
if j >= len(ks.handler.repo.repoList):
i += 1
j = i + 1
else:
i += 1
j = i + 1
def resolve_groups(creatoropts, repometadata):
iszypp = False
if 'zypp' == creatoropts['pkgmgr']:
iszypp = True
ks = creatoropts['ks']
for repo in repometadata:
""" Mustn't replace group with package list if repo is ready for the
corresponding package manager.
"""
if iszypp and repo["patterns"]:
continue
if not iszypp and repo["comps"]:
continue
# But we also must handle such cases, use zypp but repo only has comps,
# use yum but repo only has patterns, use zypp but use_comps is true,
# use yum but use_comps is false.
groupfile = None
if iszypp and repo["comps"]:
groupfile = repo["comps"]
get_pkglist_handler = misc.get_pkglist_in_comps
if not iszypp and repo["patterns"]:
groupfile = repo["patterns"]
get_pkglist_handler = misc.get_pkglist_in_patterns
if groupfile:
i = 0
while True:
if i >= len(ks.handler.packages.groupList):
break
pkglist = get_pkglist_handler(
ks.handler.packages.groupList[i].name,
groupfile)
if pkglist:
del ks.handler.packages.groupList[i]
for pkg in pkglist:
if pkg not in ks.handler.packages.packageList:
ks.handler.packages.packageList.append(pkg)
else:
i = i + 1
| ronan22/mic | mic/kickstart/__init__.py | Python | gpl-2.0 | 31,009 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, cstr, fmt_money
import unittest
class TestFmtMoney(unittest.TestCase):
def test_standard(self):
frappe.db.set_default("number_format", "#,###.##")
self.assertEqual(fmt_money(100), "100.00")
self.assertEqual(fmt_money(1000), "1,000.00")
self.assertEqual(fmt_money(10000), "10,000.00")
self.assertEqual(fmt_money(100000), "100,000.00")
self.assertEqual(fmt_money(1000000), "1,000,000.00")
self.assertEqual(fmt_money(10000000), "10,000,000.00")
self.assertEqual(fmt_money(100000000), "100,000,000.00")
self.assertEqual(fmt_money(1000000000), "1,000,000,000.00")
def test_negative(self):
frappe.db.set_default("number_format", "#,###.##")
self.assertEqual(fmt_money(-100), "-100.00")
self.assertEqual(fmt_money(-1000), "-1,000.00")
self.assertEqual(fmt_money(-10000), "-10,000.00")
self.assertEqual(fmt_money(-100000), "-100,000.00")
self.assertEqual(fmt_money(-1000000), "-1,000,000.00")
self.assertEqual(fmt_money(-10000000), "-10,000,000.00")
self.assertEqual(fmt_money(-100000000), "-100,000,000.00")
self.assertEqual(fmt_money(-1000000000), "-1,000,000,000.00")
def test_decimal(self):
frappe.db.set_default("number_format", "#.###,##")
self.assertEqual(fmt_money(-100), "-100,00")
self.assertEqual(fmt_money(-1000), "-1.000,00")
self.assertEqual(fmt_money(-10000), "-10.000,00")
self.assertEqual(fmt_money(-100000), "-100.000,00")
self.assertEqual(fmt_money(-1000000), "-1.000.000,00")
self.assertEqual(fmt_money(-10000000), "-10.000.000,00")
self.assertEqual(fmt_money(-100000000), "-100.000.000,00")
self.assertEqual(fmt_money(-1000000000), "-1.000.000.000,00")
def test_lacs(self):
frappe.db.set_default("number_format", "#,##,###.##")
self.assertEqual(fmt_money(100), "100.00")
self.assertEqual(fmt_money(1000), "1,000.00")
self.assertEqual(fmt_money(10000), "10,000.00")
self.assertEqual(fmt_money(100000), "1,00,000.00")
self.assertEqual(fmt_money(1000000), "10,00,000.00")
self.assertEqual(fmt_money(10000000), "1,00,00,000.00")
self.assertEqual(fmt_money(100000000), "10,00,00,000.00")
self.assertEqual(fmt_money(1000000000), "1,00,00,00,000.00")
def test_no_precision(self):
frappe.db.set_default("number_format", "#,###")
self.assertEqual(fmt_money(0.3), "0")
self.assertEqual(fmt_money(100.3), "100")
self.assertEqual(fmt_money(1000.3), "1,000")
self.assertEqual(fmt_money(10000.3), "10,000")
self.assertEqual(fmt_money(-0.3), "0")
self.assertEqual(fmt_money(-100.3), "-100")
self.assertEqual(fmt_money(-1000.3), "-1,000")
def test_currency_precision(self):
frappe.db.set_default("currency_precision", "4")
frappe.db.set_default("number_format", "#,###.##")
self.assertEqual(fmt_money(100), "100.00")
self.assertEqual(fmt_money(1000), "1,000.00")
self.assertEqual(fmt_money(10000), "10,000.00")
self.assertEqual(fmt_money(100000), "100,000.00")
self.assertEqual(fmt_money(1000000), "1,000,000.00")
self.assertEqual(fmt_money(10000000), "10,000,000.00")
self.assertEqual(fmt_money(100000000), "100,000,000.00")
self.assertEqual(fmt_money(1000000000), "1,000,000,000.00")
self.assertEqual(fmt_money(100.23), "100.23")
self.assertEqual(fmt_money(1000.456), "1,000.456")
self.assertEqual(fmt_money(10000.7890), "10,000.789")
self.assertEqual(fmt_money(100000.1234), "100,000.1234")
self.assertEqual(fmt_money(1000000.3456), "1,000,000.3456")
self.assertEqual(fmt_money(10000000.3344567), "10,000,000.3345")
self.assertEqual(fmt_money(100000000.37827268), "100,000,000.3783")
self.assertEqual(fmt_money(1000000000.2718272637), "1,000,000,000.2718")
frappe.db.set_default("currency_precision", "")
def test_currency_precision_de_format(self):
frappe.db.set_default("currency_precision", "4")
frappe.db.set_default("number_format", "#.###,##")
self.assertEqual(fmt_money(100), "100,00")
self.assertEqual(fmt_money(1000), "1.000,00")
self.assertEqual(fmt_money(10000), "10.000,00")
self.assertEqual(fmt_money(100000), "100.000,00")
self.assertEqual(fmt_money(100.23), "100,23")
self.assertEqual(fmt_money(1000.456), "1.000,456")
frappe.db.set_default("currency_precision", "")
if __name__=="__main__":
frappe.connect()
unittest.main() | neilLasrado/frappe | frappe/tests/test_fmt_money.py | Python | mit | 4,407 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from collections import Mapping
from pyLibrary.collections import MAX
from pyLibrary.debugs.logs import Log
from pyLibrary.dot import listwrap, Dict, wrap, literal_field, set_default, coalesce, Null, split_field, join_field
from pyLibrary.queries import qb, es09
from pyLibrary.queries.dimensions import Dimension
from pyLibrary.queries.domains import PARTITION, SimpleSetDomain, is_keyword
from pyLibrary.queries.es14.util import aggregates1_4
from pyLibrary.queries.expressions import simplify_esfilter, qb_expression_to_ruby, get_all_vars
from pyLibrary.times.timer import Timer
def is_aggsop(es, query):
es.cluster.get_metadata()
if (es.cluster.version.startswith("1.4.") or es.cluster.version.startswith("1.5.")) and (query.edges or query.groupby or any(a != None and a != "none" for a in listwrap(query.select).aggregate)):
return True
return False
def es_aggsop(es, frum, query):
select = listwrap(query.select)
es_query = Dict()
new_select = Dict()
formula = []
for s in select:
if s.aggregate == "count" and (s.value == None or s.value == "."):
s.pull = "doc_count"
elif is_keyword(s.value):
new_select[literal_field(s.value)] += [s]
else:
formula.append(s)
for litral_field, many in new_select.items():
if len(many)>1:
canonical_name=literal_field(many[0].name)
es_query.aggs[canonical_name].stats.field = many[0].value
for s in many:
if s.aggregate == "count":
s.pull = canonical_name + ".count"
else:
s.pull = canonical_name + "." + aggregates1_4[s.aggregate]
else:
s = many[0]
s.pull = literal_field(s.value) + ".value"
es_query.aggs[literal_field(s.value)][aggregates1_4[s.aggregate]].field = s.value
for i, s in enumerate(formula):
new_select[unicode(i)] = s
s.pull = literal_field(s.name) + ".value"
es_query.aggs[literal_field(s.name)][aggregates1_4[s.aggregate]].script = qb_expression_to_ruby(s.value)
decoders = [AggsDecoder(e, query) for e in coalesce(query.edges, query.groupby, [])]
start = 0
for d in decoders:
es_query = d.append_query(es_query, start)
start += d.num_columns
if query.where:
filter = simplify_esfilter(query.where)
es_query = Dict(
aggs={"_filter": set_default({"filter": filter}, es_query)}
)
if len(split_field(frum.name)) > 1:
es_query = wrap({
"size": 0,
"aggs": {"_nested": set_default({
"nested": {
"path": join_field(split_field(frum.name)[1::])
}
}, es_query)}
})
with Timer("ES query time") as es_duration:
result = es09.util.post(es, es_query, query.limit)
try:
formatter, groupby_formatter, aggop_formatter, mime_type = format_dispatch[query.format]
if query.edges:
output = formatter(decoders, result.aggregations, start, query, select)
elif query.groupby:
output = groupby_formatter(decoders, result.aggregations, start, query, select)
else:
output = aggop_formatter(decoders, result.aggregations, start, query, select)
output.meta.es_response_time = es_duration.seconds
output.meta.content_type = mime_type
output.meta.es_query = es_query
return output
except Exception, e:
if query.format not in format_dispatch:
Log.error("Format {{format|quote}} not supported yet", format= query.format, cause=e)
Log.error("Some problem", e)
class AggsDecoder(object):
def __new__(cls, *args, **kwargs):
e = args[0]
if e.value and e.domain.type == "default":
return object.__new__(DefaultDecoder, e.copy())
if e.value and e.domain.type in PARTITION:
return object.__new__(SetDecoder, e)
if isinstance(e.domain.dimension, Dimension):
e.domain = e.domain.dimension.getDomain()
return object.__new__(SetDecoder, e)
if e.value and e.domain.type == "time":
return object.__new__(TimeDecoder, e)
if e.value and e.domain.type == "duration":
return object.__new__(DurationDecoder, e)
elif e.value and e.domain.type == "range":
return object.__new__(RangeDecoder, e)
elif not e.value and e.domain.dimension.fields:
# THIS domain IS FROM A dimension THAT IS A SIMPLE LIST OF fields
# JUST PULL THE FIELDS
fields = e.domain.dimension.fields
if isinstance(fields, Mapping):
return object.__new__(DimFieldDictDecoder, e)
else:
return object.__new__(DimFieldListDecoder, e)
else:
Log.error("domain type of {{type}} is not supported yet", type= e.domain.type)
def __init__(self, edge, query):
self.start = None
self.edge = edge
self.name = literal_field(self.edge.name)
def append_query(self, es_query, start):
Log.error("Not supported")
def count(self, row):
pass
def done_count(self):
pass
def get_value_from_row(self, row):
Log.error("Not implemented")
def get_value(self, index):
Log.error("Not implemented")
def get_index(self, row):
Log.error("Not implemented")
@property
def num_columns(self):
return 0
class SetDecoder(AggsDecoder):
def append_query(self, es_query, start):
self.start = start
return wrap({"aggs": {
"_match": set_default({"terms": {"field": self.edge.value}}, es_query),
"_missing": set_default({"missing": {"field": self.edge.value}}, es_query),
}})
def get_value(self, index):
return self.edge.domain.getKeyByIndex(index)
def get_value_from_row(self, row):
return row[self.start].key
def get_index(self, row):
try:
part = row[self.start]
if part == None:
return len(self.edge.domain.partitions)
return self.edge.domain.getIndexByKey(part.key)
except Exception, e:
Log.error("problem", e)
@property
def num_columns(self):
return 1
def _range_composer(edge, domain, es_query, to_float):
# USE RANGES
_min = coalesce(domain.min, MAX(domain.partitions.min))
_max = coalesce(domain.max, MAX(domain.partitions.max))
if is_keyword(edge.value):
calc = {"field": edge.value}
else:
calc = {"script": qb_expression_to_ruby(edge.value)}
if is_keyword(edge.value):
missing_range = {"or": [
{"range": {edge.value: {"lt": to_float(_min)}}},
{"range": {edge.value: {"gte": to_float(_max)}}}
]}
else:
missing_range = {"script": {"script": qb_expression_to_ruby({"or": [
{"lt": [edge.value, to_float(_min)]},
{"gt": [edge.value, to_float(_max)]},
]})}}
return wrap({"aggs": {
"_match": set_default(
{"range": calc},
{"range": {"ranges": [{"from": to_float(p.min), "to": to_float(p.max)} for p in domain.partitions]}},
es_query
),
"_missing": set_default(
{"filter": {"or": [
missing_range,
{"missing": {"field": get_all_vars(edge.value)}}
]}},
es_query
),
}})
class TimeDecoder(AggsDecoder):
def append_query(self, es_query, start):
self.start = start
return _range_composer(self.edge, self.edge.domain, es_query, lambda x: x.unix)
def get_value(self, index):
return self.edge.domain.getKeyByIndex(index)
def get_index(self, row):
domain = self.edge.domain
part = row[self.start]
if part == None:
return len(domain.partitions)
f = coalesce(part["from"], part.key)
t = coalesce(part.to, part.key)
if f == None or t == None:
return len(domain.partitions)
else:
for p in domain.partitions:
if p.min.unix <= f <p.max.unix:
return p.dataIndex
sample = part.copy
sample.buckets = None
Log.error("Expecting to find {{part}}", part=sample)
@property
def num_columns(self):
return 1
class DurationDecoder(AggsDecoder):
def append_query(self, es_query, start):
self.start = start
return _range_composer(self.edge, self.edge.domain, es_query, lambda x: x.seconds)
def get_value(self, index):
return self.edge.domain.getKeyByIndex(index)
def get_index(self, row):
domain = self.edge.domain
part = row[self.start]
if part == None:
return len(domain.partitions)
f = coalesce(part["from"], part.key)
t = coalesce(part.to, part.key)
if f == None or t == None:
return len(domain.partitions)
else:
for p in domain.partitions:
if p.min.seconds <= f < p.max.seconds:
return p.dataIndex
sample = part.copy
sample.buckets = None
Log.error("Expecting to find {{part}}", part=sample)
@property
def num_columns(self):
return 1
class RangeDecoder(AggsDecoder):
def append_query(self, es_query, start):
self.start = start
return _range_composer(self.edge, self.edge.domain, es_query, lambda x: x)
def get_value(self, index):
return self.edge.domain.getKeyByIndex(index)
def get_index(self, row):
domain = self.edge.domain
part = row[self.start]
if part == None:
return len(domain.partitions)
f = coalesce(part["from"], part.key)
t = coalesce(part.to, part.key)
if f == None or t == None:
return len(domain.partitions)
else:
for p in domain.partitions:
if p.min <= f <p.max:
return p.dataIndex
sample = part.copy
sample.buckets = None
Log.error("Expecting to find {{part}}", part=sample)
@property
def num_columns(self):
return 1
class DefaultDecoder(SetDecoder):
# FOR DECODING THE default DOMAIN TYPE (UNKNOWN-AT-QUERY-TIME SET OF VALUES)
def __init__(self, edge, query):
AggsDecoder.__init__(self, edge, query)
self.edge = self.edge.copy()
self.edge.allowNulls = False # SINCE WE DO NOT KNOW THE DOMAIN, WE HAVE NO SENSE OF WHAT IS OUTSIDE THAT DOMAIN, allowNulls==True MAKES NO SENSE
self.edge.domain.partitions = set()
self.edge.domain.limit = coalesce(self.edge.domain.limit, query.limit, 10)
def append_query(self, es_query, start):
self.start = start
return wrap({"aggs": {
"_match": set_default(
{"terms": {
"field": self.edge.value,
"size": self.edge.domain.limit
}},
es_query
),
"_missing": set_default({"missing": {"field": self.edge.value}}, es_query),
}})
def count(self, row):
part = row[self.start]
if part == None:
self.edge.allowNulls = True # OK! WE WILL ALLOW NULLS
else:
self.edge.domain.partitions.add(part.key)
def done_count(self):
self.edge.domain = SimpleSetDomain(
partitions=qb.sort(self.edge.domain.partitions)
)
@property
def num_columns(self):
return 1
class DimFieldListDecoder(DefaultDecoder):
def __init__(self, edge, query):
DefaultDecoder.__init__(self, edge, query)
self.fields = edge.domain.dimension.fields
def append_query(self, es_query, start):
self.start = start
for i, v in enumerate(self.fields):
es_query = wrap({"aggs": {
"_match": set_default({"terms": {"field": v}}, es_query),
"_missing": set_default({"missing": {"field": v}}, es_query),
}})
if self.edge.domain.where:
filter = simplify_esfilter(self.edge.domain.where)
es_query = {"aggs": {"_filter": set_default({"filter": filter}, es_query)}}
return es_query
def count(self, row):
part = row[self.start:self.start + len(self.fields):]
value = tuple(p.key for p in part)
self.edge.domain.partitions.add(value)
def done_count(self):
self.edge.domain = SimpleSetDomain(
key="value",
partitions=[{"value": v, "dataIndex": i} for i, v in enumerate(qb.sort(self.edge.domain.partitions, range(len(self.fields))))]
)
def get_index(self, row):
parts = self.edge.domain.partitions
find = tuple(p.key for p in row[self.start:self.start + self.num_columns:])
for p in parts:
if p.value == find:
return p.dataIndex
else:
return len(parts)
def _get_sub(self, aggs, coord):
domain = self.edge.domain
buckets = aggs[self.name].buckets
for b in buckets:
c = domain.getIndexByKey(b.key)
yield (c, b)
@property
def num_columns(self):
return len(self.fields)
class DimFieldDictDecoder(DefaultDecoder):
def __init__(self, edge, query):
DefaultDecoder.__init__(self, edge, query)
self.fields = edge.domain.dimension.fields.items()
def append_query(self, es_query, start):
self.start = start
for i, (k, v) in enumerate(self.fields):
es_query = wrap({"aggs": {
"_match": set_default({"terms": {"field": v}}, es_query),
"_missing": set_default({"missing": {"field": v}}, es_query),
}})
if self.edge.domain.where:
filter = simplify_esfilter(self.edge.domain.where)
es_query = {"aggs": {"_filter": set_default({"filter": filter}, es_query)}}
return es_query
def count(self, row):
part = row[self.start:self.start + len(self.fields):]
value = {k: p.key for (k, v), p in zip(self.fields, part)}
self.edge.domain.partitions.add(value)
def done_count(self):
self.edge.domain = SimpleSetDomain(
key="value",
partitions=[{"value": v, "dataIndex": i} for i, v in enumerate(qb.sort(self.edge.domain.partitions, [k for k, v in self.fields]))]
)
def get_index(self, row):
parts = self.edge.domain.partitions
find = tuple(p.key for p in row[self.start:self.start + self.num_columns:])
for p in parts:
if p.value == find:
return p.dataIndex
else:
return len(parts)
def _get_sub(self, aggs, coord):
domain = self.edge.domain
buckets = aggs[self.name].buckets
for b in buckets:
c = domain.getIndexByKey(b.key)
yield (c, b)
@property
def num_columns(self):
return len(self.fields.values())
def aggs_iterator(aggs, decoders):
"""
DIG INTO ES'S RECURSIVE aggs DATA-STRUCTURE:
RETURN AN ITERATOR OVER THE EFFECTIVE ROWS OF THE RESULTS
"""
depth = decoders[-1].start + decoders[-1].num_columns
parts = [None] * depth
def _aggs_iterator(agg, d):
deeper = coalesce(agg._filter, agg._nested)
while deeper:
agg = deeper
deeper = coalesce(agg._filter, agg._nested)
if d > 0:
for b in agg._match.buckets:
parts[d] = b
for a in _aggs_iterator(b, d - 1):
yield a
parts[d] = Null
for b in agg._other.buckets:
for a in _aggs_iterator(b, d - 1):
yield a
b = agg._missing
if b.doc_count:
for a in _aggs_iterator(b, d - 1):
yield a
else:
for b in agg._match.buckets:
parts[d] = b
if b.doc_count:
yield b
parts[d] = Null
for b in agg._other.buckets:
if b.doc_count:
yield b
b = agg._missing
if b.doc_count:
yield b
for a in _aggs_iterator(aggs, depth - 1):
yield parts, a
def count_dim(aggs, decoders):
if any(isinstance(d, DefaultDecoder) for d in decoders):
# ENUMERATE THE DOMAINS, IF UNKNOWN AT QUERY TIME
for row, agg in aggs_iterator(aggs, decoders):
for d in decoders:
d.count(row)
for d in decoders:
d.done_count()
new_edges = wrap([d.edge for d in decoders])
return new_edges
format_dispatch = {}
from pyLibrary.queries.es14.format import format_cube
_ = format_cube
| klahnakoski/intermittents | pyLibrary/queries/es14/aggs.py | Python | mpl-2.0 | 17,393 |
#!/usr/bin/env python
import os, sys, argparse
from operator import itemgetter
sys.path.append(
os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, os.pardir, os.pardir)))
from themis.metaprograms.utils import StatQuery
import themis.metaprograms.utils as utils
def get_list_time_series_queries():
worker_query = StatQuery(
"COLL",
("phase_name", None),
("epoch", None),
("stage_name", None),
("id", None),
("collection_stat_name", None))
def worker_match_function(match, data):
phase = match["phase_name"]
stage = match["stage_name"]
worker_id = str(match["id"])
stat_name = match["collection_stat_name"]
if "time_series_keys" not in data:
data["time_series_keys"] = set()
time_series_key = (phase, stage, worker_id, stat_name)
data["time_series_keys"].add(time_series_key)
worker_query.match_processor_function = worker_match_function
tracker_query = StatQuery(
"COLL",
("phase_name", None),
("epoch", None),
("logger_name", None),
("collection_stat_name", None))
def tracker_match_function(match, data):
phase = match["phase_name"]
stage = match["logger_name"]
stat_name = match["collection_stat_name"]
if "time_series_keys" not in data:
data["time_series_keys"] = set()
time_series_key = (phase, stage, None, stat_name)
data["time_series_keys"].add(time_series_key)
tracker_query.match_processor_function = tracker_match_function
return (worker_query, tracker_query)
def time_series_tuple_to_str(time_series_tuple):
if time_series_tuple[2] == None:
return "%s.%s" % (".".join(time_series_tuple[0:2]), ".".join(
time_series_tuple[3:]))
else:
return ".".join(time_series_tuple)
def list_time_series(
experiment_log_dir, plot_spec_strings, output_filename, verbose):
queries = []
queries.extend(get_list_time_series_queries())
time_series_data = utils.process_queries(
queries, experiment_log_dir, verbose)
time_series_keys = time_series_data["time_series_keys"]
output_fp = open(output_filename, 'w')
for time_series_key in sorted(time_series_keys, key=itemgetter(0,1,3,2)):
print >> output_fp, time_series_tuple_to_str(time_series_key)
output_fp.close()
def main():
parser = argparse.ArgumentParser(
description="List available time-series data from a log directory")
parser.add_argument("experiment_log_dir", help="a directory containing log "
"data for an experiment")
parser.add_argument(
"--output", "-o", help="output filename (default: %(default)s)",
default="time_series_keys.txt")
parser.add_argument(
"-v", "--verbose", default=False, action="store_true",
help="Print detailed log processing information")
args = parser.parse_args()
list_time_series(experiment_log_dir = args.experiment_log_dir,
plot_spec_strings = ["*.*.*.*.*"],
output_filename = args.output, verbose=args.verbose)
if __name__ == "__main__":
main()
| mconley99/themis_tritonsort | src/scripts/themis/metaprograms/list_time_series/list_time_series.py | Python | bsd-3-clause | 3,286 |
import csv
import h5py
from collections import defaultdict
import numpy as np
import os
def extract_detections(path_data, path_csv, path_output):
CUBE_SZ = 54
# parse the detections csv
with open(path_csv, "r") as f:
reader = csv.reader(f)
list_detections = list(reader)
list_detections.pop(0)
detections = defaultdict(lambda: [])
for entry in list_detections:
detections[entry[0]].append((int(float(entry[1])),
int(float(entry[2])),
int(float(entry[3]))))
log_filename = "{}.log".format(os.path.splitext(path_output)[0])
# crop detected ROIs and write to hdf5 file
i_counter = 0
n_counter = len(detections.keys())
with open(log_filename, "w") as log_file:
with h5py.File(path_output, "w") as f_h5:
for id, coords in detections.items():
# load CT scan (ct_scan is [z, x, y])
try:
# detections.csv seriesuid string is missing chars
file_id = [f for f in os.listdir(path_data) if id in f]
assert(len(file_id) == 1)
id_ = os.path.splitext(file_id[0])[0]
ct_scan = np.load(os.path.join(path_data,
"{}.npy".format(id_)))
crops = np.zeros((CUBE_SZ, CUBE_SZ, CUBE_SZ, len(coords)))
# pad ct_scan and crop
i_counter += 1
if i_counter % 10 == 0:
print("*** extracting {}/{}" \
.format(i_counter, n_counter))
ct_scan_shape = ct_scan.shape
ct_scan = np.pad(ct_scan, CUBE_SZ, "constant")
for i, xyz in enumerate(coords):
# fix offset in x,y (detections in 512x512 window)
xyz = list(xyz)
xyz[0] = xyz[0] - int((512-ct_scan_shape[1])/2)
xyz[1] = xyz[1] - int((512-ct_scan_shape[2])/2)
try:
crops[:, :, :, i] = ct_scan[
xyz[2] + CUBE_SZ : xyz[2] + 2 * CUBE_SZ,
xyz[0] + CUBE_SZ : xyz[0] + 2 * CUBE_SZ,
xyz[1] + CUBE_SZ : xyz[1] + 2 * CUBE_SZ]
except ValueError:
print("*** ERROR in {}".format(i_counter))
log_file.write("Error in {}, shape: {}, xyz: {}\n" \
.format(id_, ct_scan_shape, xyz))
# write
f_h5.create_dataset(id_,
shape=crops.shape,
dtype=np.int16,
data=crops)
except IOError:
print("*** ERROR in {}".format(i_counter))
log_file.write("File {}.npy not found!\n" \
.format(id, ct_scan_shape, xyz)) | syagev/kaggle_dsb | kaggle/util.py | Python | apache-2.0 | 3,228 |
"""
Common task fixtures.
"""
#
# Fimfarchive, preserves stories from Fimfiction.
# Copyright (C) 2020 Joakim Soderlund
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from copy import deepcopy
from typing import Dict
from fimfarchive.exceptions import InvalidStoryError
from fimfarchive.converters import Converter
from fimfarchive.fetchers import Fetcher
from fimfarchive.stories import Story
from fimfarchive.utils import Empty
class DummyConverer(Converter):
"""
Converter that increments a counter.
"""
def __call__(self, story: Story) -> Story:
meta = deepcopy(story.meta)
meta['conversions'] += 1
return story.merge(meta=meta)
class DummyFetcher(Fetcher):
"""
Fetcher with local instance storage.
"""
def __init__(self):
"""
Constructor.
"""
self.stories: Dict[int, Story] = dict()
def add(self, key, date, flavors=(), data=Empty):
"""
Adds a story to the fetcher.
"""
meta = {
'id': key,
'title': f't{key}',
'date_modified': date,
'conversions': 0,
'author': {
'id': key,
'name': f'n{key}'
},
'chapters': [
{'id': key},
],
}
if data is Empty:
text = f'd{key}'
data = text.encode()
story = Story(
key=key,
fetcher=self,
meta=meta,
data=data,
flavors=flavors,
)
self.stories[key] = story
return story
def fetch(self, key, prefetch_meta=None, prefetch_data=None):
"""
Returns a previously stored story.
"""
try:
return self.stories[key]
except KeyError:
raise InvalidStoryError()
def fetch_data(self, key):
"""
Raises exception for missing data.
"""
raise InvalidStoryError()
def fetch_meta(self, key):
"""
Raises exception for missing meta.
"""
raise InvalidStoryError()
def __iter__(self):
"""
Yields all previously stored stories.
"""
for key in sorted(self.stories.keys()):
yield self.stories[key]
| JockeTF/fimfarchive | tests/tasks/conftest.py | Python | gpl-3.0 | 2,907 |
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
from neutron.api.v2 import attributes
PROFILE_ID = 'n1kv:profile_id'
MULTICAST_IP = 'n1kv:multicast_ip'
SEGMENT_ADD = 'n1kv:segment_add'
SEGMENT_DEL = 'n1kv:segment_del'
MEMBER_SEGMENTS = 'n1kv:member_segments'
EXTENDED_ATTRIBUTES_2_0 = {
'networks': {
PROFILE_ID: {'allow_post': True, 'allow_put': False,
'validate': {'type:regex': attributes.UUID_PATTERN},
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True},
MULTICAST_IP: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True},
SEGMENT_ADD: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True},
SEGMENT_DEL: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True},
MEMBER_SEGMENTS: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True},
},
'ports': {
PROFILE_ID: {'allow_post': True, 'allow_put': False,
'validate': {'type:regex': attributes.UUID_PATTERN},
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True}
}
}
class N1kv(extensions.ExtensionDescriptor):
"""Extension class supporting N1kv profiles.
This class is used by neutron's extension framework to make
metadata about the n1kv profile extension available to
clients. No new resources are defined by this extension. Instead,
the existing network resource's request and response messages are
extended with attributes in the n1kv profile namespace.
To create a network based on n1kv profile using the CLI with admin rights:
(shell) net-create --tenant_id <tenant-id> <net-name> \
--n1kv:profile_id <id>
(shell) port-create --tenant_id <tenant-id> <net-name> \
--n1kv:profile_id <id>
With admin rights, network dictionaries returned from CLI commands
will also include n1kv profile attributes.
"""
@classmethod
def get_name(cls):
return "n1kv"
@classmethod
def get_alias(cls):
return "n1kv"
@classmethod
def get_description(cls):
return "Expose network profile"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/n1kv/api/v2.0"
@classmethod
def get_updated(cls):
return "2012-11-15T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| samsu/neutron | plugins/cisco/extensions/n1kv.py | Python | apache-2.0 | 3,484 |
# Copyright 2017 Mark Dickinson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pcgrandom import Random
class TestDistributions(unittest.TestCase):
"""Minimal tests for the continuous distributions in the Distributions
mixin class. Since the code is unaltered from that in the standard library,
we leave it to the standard library tests to do more effective
distribution-specific testing.
"""
def setUp(self):
self.gen = Random(12345)
def test_uniform(self):
self.assertIsInstance(self.gen.uniform(1.0, 2.0), float)
def test_triangular(self):
self.assertIsInstance(self.gen.triangular(1.0, 2.0), float)
self.assertIsInstance(self.gen.triangular(1.0, 2.0, 1.3), float)
# Exercise corner cases where low == high.
self.assertEqual(self.gen.triangular(1.3, 1.3), 1.3)
self.assertEqual(self.gen.triangular(1.3, 1.3, 1.3), 1.3)
# Corner cases where mid == low or mid == high
self.assertIsInstance(self.gen.triangular(1.0, 2.0, 2.0), float)
self.assertIsInstance(self.gen.triangular(1.0, 2.0, 1.0), float)
def test_normalvariate(self):
sample = [self.gen.normalvariate(10.0, 5.0) for _ in range(10)]
for elt in sample:
self.assertIsInstance(elt, float)
def test_lognormvariate(self):
sample = [self.gen.lognormvariate(10.0, 5.0) for _ in range(10)]
for elt in sample:
self.assertIsInstance(elt, float)
def test_expovariate(self):
sample = [self.gen.expovariate(3.2) for _ in range(10)]
for elt in sample:
self.assertIsInstance(elt, float)
def test_vonmisesvariate(self):
sample = [self.gen.vonmisesvariate(0.0, 2.0) for _ in range(20)]
for elt in sample:
self.assertIsInstance(elt, float)
# Corner case where kappa is tiny.
sample = [self.gen.vonmisesvariate(0.0, 2e-10) for _ in range(20)]
for elt in sample:
self.assertIsInstance(elt, float)
def test_gammavariate(self):
with self.assertRaises(ValueError):
self.gen.gammavariate(1.0, 0.0)
with self.assertRaises(ValueError):
self.gen.gammavariate(0.0, 1.0)
# The implementation has separate cases for alpha less than,
# equal to, or greater than 1. Make sure we exercise all three.
sample = [self.gen.gammavariate(0.7, 1.3) for _ in range(10)]
for elt in sample:
self.assertIsInstance(elt, float)
# Generate enough deviates to exercise all the branches.
sample = [self.gen.gammavariate(0.2, 1.3) for _ in range(100)]
for elt in sample:
self.assertIsInstance(elt, float)
sample = [self.gen.gammavariate(1.0, 1.3) for _ in range(10)]
for elt in sample:
self.assertIsInstance(elt, float)
sample = [self.gen.gammavariate(1.3, 1.3) for _ in range(10)]
for elt in sample:
self.assertIsInstance(elt, float)
def test_gauss(self):
sample = [self.gen.gauss(3.7, 1.3) for _ in range(10)]
for elt in sample:
self.assertIsInstance(elt, float)
def test_betavariate(self):
sample = [self.gen.betavariate(0.7, 1.3) for _ in range(10)]
for elt in sample:
self.assertIsInstance(elt, float)
def test_paretovariate(self):
sample = [self.gen.paretovariate(0.5) for _ in range(10)]
for elt in sample:
self.assertIsInstance(elt, float)
def test_weibullvariate(self):
sample = [self.gen.weibullvariate(700.0, 2.5) for _ in range(10)]
for elt in sample:
self.assertIsInstance(elt, float)
| mdickinson/pcgrandom | pcgrandom/test/test_distributions.py | Python | apache-2.0 | 4,229 |
import gtk
import BaseObject
class PropertyBox(gtk.HBox):
def __init__(self, prop):
gtk.HBox.__init__(self, False, 5)
self.property = prop
label = gtk.Label(prop.nickName)
self.pack_start(label, False, False)
if prop.type == BaseObject.PROPTYPE_COLOR:
b = gtk.ColorButton()
b.set_color(prop.value)
b.set_title(prop.nickName)
b.connect("color-set", self.colorChanged)
self.pack_start(b, False, False)
elif prop.type == BaseObject.PROPTYPE_FLOAT:
a = gtk.Adjustment(prop.value, 0.0, 10000.0, 0.1, 0.5)
b = gtk.SpinButton(a, digits = 1)
b.connect("value-changed", self.floatSpinChanged)
self.pack_start(b)
elif prop.type == BaseObject.PROPTYPE_INTEGER:
a = gtk.Adjustment(prop.value, 0, 10000, 1, 10)
b = gtk.SpinButton(a)
b.connect("value-changed", self.intSpinChanged)
self.pack_start(b)
elif prop.type == BaseObject.PROPTYPE_STRING:
e = gtk.Entry()
e.set_text(prop.value)
e.connect("changed", self.stringChanged)
self.pack_start(e)
elif prop.type == BaseObject.PROPTYPE_STRINGS:
c = gtk.combo_box_new_text()
for v in prop.vals:
c.append_text(v)
c.set_active(prop.vals.index(prop.value))
c.connect("changed", self.stringsChanged)
self.pack_start(c)
elif prop.type == BaseObject.PROPTYPE_IMAGE:
self.pack_start(gtk.Image())
elif prop.type == BaseObject.PROPTYPE_IMAGES:
self.pack_start(gtk.ComboBox())
def colorChanged(self, widget):
self.property.setValue(widget.get_color())
def floatSpinChanged(self, widget):
self.property.setValue(widget.get_value())
def intSpinChanged(self, widget):
self.property.setValue(widget.get_value_as_int())
def stringChanged(self, widget):
self.property.setValue(widget.get_text())
def stringsChanged(self, widget):
self.property.setValue(widget.get_active_text())
class PropertiesPane(gtk.VBox):
def __init__(self):
gtk.VBox.__init__(self)
self.set_spacing(5)
self.titleLabel = gtk.Label("Properties")
self.pack_start(self.titleLabel, False, False)
scroll = gtk.ScrolledWindow()
scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.box = gtk.VBox(5)
self.box.set_border_width(5)
scroll.add_with_viewport(self.box)
self.pack_start(scroll)
def loadProperties(self, shape):
self.box.foreach(self.box.remove)
self.titleLabel.set_text(shape.name)
for p in shape.properties:
self.loadProperty(p)
def loadProperty(self, prop):
box = PropertyBox(prop)
self.box.pack_start(box, False, False)
self.box.show_all()
| fredmorcos/attic | projects/opengrafik/opengrafik_20090719_python_gtk/PropertiesPane.py | Python | isc | 2,534 |
f = lambda n: 1 if n < 2 else f(n-1) + f(n-2)
g = lambda m: map(f, range(0,m))
print sum(g(7))
| Dawny33/Code | Hackerrank/Codeathon Unleashed/test.py | Python | gpl-3.0 | 96 |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import importlib
import inspect
import json
import logging
import os
import sys
import types
from azure.common.credentials import BasicTokenAuthentication
from azure.core.credentials import AccessToken
from azure.identity import (AzureCliCredential, ClientSecretCredential,
ManagedIdentityCredential)
from azure.identity._credentials.azure_cli import _run_command
from msrestazure.azure_cloud import AZURE_PUBLIC_CLOUD
from requests import HTTPError
from c7n_azure import constants
from c7n_azure.utils import (C7nRetryPolicy, ManagedGroupHelper,
ResourceIdParser, StringUtils,
cost_query_override_api_version,
custodian_azure_send_override,
get_keyvault_auth_endpoint, get_keyvault_secret,
log_response_data)
from functools import lru_cache
log = logging.getLogger('custodian.azure.session')
class AzureCredential:
def __init__(self, cloud_endpoints, authorization_file=None, subscription_id_override=None):
# type: (*str, *str) -> None
if authorization_file:
with open(authorization_file) as json_file:
self._auth_params = json.load(json_file)
else:
self._auth_params = {
'client_id': os.environ.get(constants.ENV_CLIENT_ID),
'client_secret': os.environ.get(constants.ENV_CLIENT_SECRET),
'access_token': os.environ.get(constants.ENV_ACCESS_TOKEN),
'tenant_id': os.environ.get(constants.ENV_TENANT_ID),
'use_msi': bool(os.environ.get(constants.ENV_USE_MSI)),
'subscription_id': os.environ.get(constants.ENV_SUB_ID),
'keyvault_client_id': os.environ.get(constants.ENV_KEYVAULT_CLIENT_ID),
'keyvault_secret_id': os.environ.get(constants.ENV_KEYVAULT_SECRET_ID),
'enable_cli_auth': True
}
self._auth_params['authority'] = cloud_endpoints.endpoints.active_directory
keyvault_client_id = self._auth_params.get('keyvault_client_id')
keyvault_secret_id = self._auth_params.get('keyvault_secret_id')
# If user provided KeyVault secret, we will pull auth params information from it
try:
if keyvault_secret_id:
self._auth_params.update(
json.loads(
get_keyvault_secret(
keyvault_client_id,
keyvault_secret_id)
))
except HTTPError as e:
e.message = 'Failed to retrieve SP credential ' \
'from Key Vault with client id: {0}'.format(keyvault_client_id)
raise
self._credential = None
if self._auth_params.get('access_token') is not None:
auth_name = 'Access Token'
pass
elif (self._auth_params.get('client_id') and
self._auth_params.get('client_secret') and
self._auth_params.get('tenant_id')
):
auth_name = 'Principal'
self._credential = ClientSecretCredential(
client_id=self._auth_params['client_id'],
client_secret=self._auth_params['client_secret'],
tenant_id=self._auth_params['tenant_id'],
authority=self._auth_params['authority'])
elif self._auth_params.get('use_msi'):
auth_name = 'MSI'
self._credential = ManagedIdentityCredential(
client_id=self._auth_params.get('client_id'))
elif self._auth_params.get('enable_cli_auth'):
auth_name = 'Azure CLI'
self._credential = AzureCliCredential()
account_info, error = _run_command('az account show --output json')
account_json = json.loads(account_info)
self._auth_params['subscription_id'] = account_json['id']
self._auth_params['tenant_id'] = account_json['tenantId']
if error is not None:
raise Exception('Unable to query TenantId and SubscriptionId')
if subscription_id_override is not None:
self._auth_params['subscription_id'] = subscription_id_override
self._subscription_id = self._auth_params['subscription_id']
self._tenant_id = self._auth_params['tenant_id']
log.info('Authenticated [%s | %s%s]',
auth_name, self.subscription_id,
' | Authorization File' if authorization_file else '')
def get_token(self, *scopes, **kwargs):
# Access Token is used only in tests realistically because
# KeyVault, Storage and mgmt plane requires separate tokens.
# TODO: Should we scope this to tests only?
if (self._auth_params.get('access_token')):
return AccessToken(self._auth_params['access_token'], expires_on=0)
try:
return self._credential.get_token(*scopes, **kwargs)
except Exception as e:
log.error('Failed to authenticate.\nMessage: {}'.format(e))
exit(1)
# This is temporary until all SDKs we use are upgraded to Track 2
# List of legacy users:
# - DNS
# - Record Set (uses DNS SDK)
# - Azure Graph
def legacy_credentials(self, scope):
# Track 2 SDKs use tuple
token = self.get_token((scope + '.default'))
return BasicTokenAuthentication(token={'access_token': token.token})
@property
def tenant_id(self):
# type: (None) -> str
return self._tenant_id
@property
def auth_params(self):
# type: (None) -> str
return self._auth_params
@property
def subscription_id(self):
# type: (None) -> str
return self._subscription_id
class Session:
def __init__(self, subscription_id=None, authorization_file=None,
cloud_endpoints=None, resource_endpoint_type=constants.DEFAULT_AUTH_ENDPOINT):
"""
:param subscription_id: If provided overrides environment variables.
:param authorization_file: Path to file populated from 'get_functions_auth_string'
:param cloud_endpoints: List of endpoints for specified Azure Cloud. Defaults to public.
:param auth_endpoint: Resource endpoint for OAuth token.
"""
self._provider_cache = {}
self.subscription_id_override = subscription_id
self.credentials = None
self.authorization_file = authorization_file
self.cloud_endpoints = cloud_endpoints or AZURE_PUBLIC_CLOUD
self.resource_endpoint_type = resource_endpoint_type
self.resource_endpoint = self.get_auth_endpoint(resource_endpoint_type)
self.storage_endpoint = self.cloud_endpoints.suffixes.storage_endpoint
def _initialize_session(self):
"""
Creates a session using available authentication type.
"""
# Only run once
if self.credentials is not None:
return
try:
self.credentials = AzureCredential(
self.cloud_endpoints,
authorization_file=self.authorization_file,
subscription_id_override=self.subscription_id_override)
except Exception as e:
if hasattr(e, 'message'):
log.error(e.message)
else:
log.exception("Failed to authenticate.")
sys.exit(1)
if self.credentials is None:
log.error('Failed to authenticate.')
sys.exit(1)
def get_session_for_resource(self, resource):
return Session(
subscription_id=self.subscription_id_override,
authorization_file=self.authorization_file,
cloud_endpoints=self.cloud_endpoints,
resource_endpoint_type=resource)
@lru_cache()
def client(self, client, vault_url=None):
self._initialize_session()
service_name, client_name = client.rsplit('.', 1)
svc_module = importlib.import_module(service_name)
klass = getattr(svc_module, client_name)
klass_parameters = inspect.signature(klass).parameters
legacy = False
if 'credentials' in klass_parameters and 'tenant_id' in klass_parameters:
client = klass(credentials=self.credentials.legacy_credentials(self.resource_endpoint),
tenant_id=self.credentials.tenant_id,
base_url=self.resource_endpoint)
legacy = True
elif 'credentials' in klass_parameters:
client = klass(credentials=self.credentials.legacy_credentials(self.resource_endpoint),
subscription_id=self.credentials.subscription_id,
base_url=self.cloud_endpoints.endpoints.resource_manager)
legacy = True
else:
client_args = {
'credential': self.credentials,
'raw_response_hook': log_response_data,
'retry_policy': C7nRetryPolicy(),
'credential_scopes': [self.resource_endpoint + ".default"]
}
# TODO: remove when fixed: https://github.com/Azure/azure-sdk-for-python/issues/17351
# This workaround will replace used api-version for costmanagement requests
# 2020-06-01 is not supported, but 2019-11-01 is working as expected.
if client == 'azure.mgmt.costmanagement.CostManagementClient':
client_args['raw_request_hook'] = cost_query_override_api_version
if 'subscription_id' in klass_parameters:
client_args['subscription_id'] = self.subscription_id
client_args['base_url'] = self.cloud_endpoints.endpoints.resource_manager
elif 'vault_url' in klass_parameters:
client_args['vault_url'] = vault_url
client = klass(**client_args)
if legacy:
# Override send() method to log request limits & custom retries
service_client = client._client
service_client.orig_send = service_client.send
service_client.send = types.MethodType(custodian_azure_send_override, service_client)
# Don't respect retry_after_header to implement custom retries
service_client.config.retry_policy.policy.respect_retry_after_header = False
return client
@property
def subscription_id(self):
self._initialize_session()
return self.credentials.subscription_id
def get_credentials(self):
self._initialize_session()
return self.credentials
def get_subscription_id(self):
self._initialize_session()
return self.credentials.subscription_id
def get_function_target_subscription_name(self):
self._initialize_session()
if constants.ENV_FUNCTION_MANAGEMENT_GROUP_NAME in os.environ:
return os.environ[constants.ENV_FUNCTION_MANAGEMENT_GROUP_NAME]
return os.environ.get(constants.ENV_FUNCTION_SUB_ID, self.subscription_id)
def get_function_target_subscription_ids(self):
self._initialize_session()
if constants.ENV_FUNCTION_MANAGEMENT_GROUP_NAME in os.environ:
return ManagedGroupHelper.get_subscriptions_list(
os.environ[constants.ENV_FUNCTION_MANAGEMENT_GROUP_NAME], self)
return [os.environ.get(constants.ENV_FUNCTION_SUB_ID, self.subscription_id)]
def resource_api_version(self, resource_id):
""" latest non-preview api version for resource """
namespace = ResourceIdParser.get_namespace(resource_id)
resource_type = ResourceIdParser.get_resource_type(resource_id)
cache_id = namespace + resource_type
if cache_id in self._provider_cache:
return self._provider_cache[cache_id]
resource_client = self.client('azure.mgmt.resource.ResourceManagementClient')
provider = resource_client.providers.get(namespace)
# The api version may be directly provided
if not provider.resource_types and resource_client.providers.api_version:
return resource_client.providers.api_version
rt = next((t for t in provider.resource_types
if StringUtils.equal(t.resource_type, resource_type)), None)
if rt and rt.api_versions:
versions = [v for v in rt.api_versions if 'preview' not in v.lower()]
api_version = versions[0] if versions else rt.api_versions[0]
self._provider_cache[cache_id] = api_version
return api_version
def get_tenant_id(self):
self._initialize_session()
return self.credentials.tenant_id
def get_functions_auth_string(self, target_subscription_id):
"""Build auth json string for deploying Azure Functions.
Look for dedicated Functions environment variables or fall
back to normal Service Principal variables.
"""
self._initialize_session()
function_auth_variables = [
constants.ENV_FUNCTION_TENANT_ID,
constants.ENV_FUNCTION_CLIENT_ID,
constants.ENV_FUNCTION_CLIENT_SECRET
]
required_params = ['client_id', 'client_secret', 'tenant_id']
function_auth_params = {k: v for k, v in self.credentials.auth_params.items()
if k in required_params and v is not None}
function_auth_params['subscription_id'] = target_subscription_id
# Use dedicated function env vars if available
if all(k in os.environ for k in function_auth_variables):
function_auth_params['client_id'] = os.environ[constants.ENV_FUNCTION_CLIENT_ID]
function_auth_params['client_secret'] = os.environ[constants.ENV_FUNCTION_CLIENT_SECRET]
function_auth_params['tenant_id'] = os.environ[constants.ENV_FUNCTION_TENANT_ID]
# Verify SP authentication parameters
if any(k not in function_auth_params.keys() for k in required_params):
raise NotImplementedError(
"Service Principal credentials are the only "
"supported auth mechanism for deploying functions.")
return json.dumps(function_auth_params, indent=2)
def get_auth_endpoint(self, endpoint):
if endpoint == constants.VAULT_AUTH_ENDPOINT:
return get_keyvault_auth_endpoint(self.cloud_endpoints)
elif endpoint == constants.STORAGE_AUTH_ENDPOINT:
# These endpoints are not Cloud specific, but the suffixes are
return constants.STORAGE_AUTH_ENDPOINT
else:
return getattr(self.cloud_endpoints.endpoints, endpoint)
| alfredgamulo/cloud-custodian | tools/c7n_azure/c7n_azure/session.py | Python | apache-2.0 | 14,857 |
import numpy as np
'''my rk4'''
def rk4(f,y0,a0,I0,dt,T):
mysolu=[]
mysola=[]
mysolI=[]
mytime=[]
t=0
un=y0
an=a0
In=I0
mytime.append(t)
mysolu.append(un)
mysola.append(an)
mysolI.append(In)
while t<=T:
k1_e,k1_a,k1_I=f(un,an,In,t)
k2_e,k2_a,k2_I=f(un+(dt/2)*k1_e,an+(dt/2)*k1_a,In+(dt/2)*k1_I,t+dt/2)
k3_e,k3_a,k3_I=f(un+(dt/2)*k2_e,an+(dt/2)*k2_a,In+(dt/2)*k2_I,t+dt/2)
k4_e,k4_a,k4_I=f(un+dt*k3_e,an+dt*k3_a,In+dt*k3_I,t+dt)
un=un+(dt/6)*(k1_e+2*k2_e+2*k3_e+k4_e)
an=an+(dt/6)*(k1_a+2*k2_a+2*k3_a+k4_a)
In=In+(dt/6)*(k1_I+2*k2_I+2*k3_I+k4_I)
t=t+dt
mysolu.append(un)
mysola.append(an)
mysolI.append(In)
mytime.append(t)
print(t)
return np.array(mysolu),np.array(mysola),np.array(mysolI),np.array(mytime)
| ulisespereira/PereiraBrunel2016 | figure1/myintegrator.py | Python | gpl-2.0 | 754 |
#!/usr/local/bin/python3
import cgi
print("Content-type: text/html")
print('''
<!DOCTYPE html>
<html>
<head>
<title>Linux</title>
</head>
<body>
<h1>Linux</h1>
<p>Linux</p>
<p>This is the article for Linux</p>
</body>
</html>
''')
| Secretmapper/updevcamp-session-2-dist | form/cgi-bin/lectures/simple/linux.py | Python | mit | 268 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, date
from openerp import tools
from openerp.osv import osv, fields
AVAILABLE_STATUS = [
('concluido', u'Concluído'),
('cursando', u'Cursando'),
('trancado', u'Trancado'),
]
AVAILABLE_LANGUAGE_STATUS = [
('concluido', u'Nativo'),
('cursando', u'Domínio'),
('trancado', u'Fluente'),
('trancado', u'Intermediário'),
('trancado', u'Básico'),
]
AVAILABLE_EXAM_STATUS = [
('apto', u'Apto'),
('inapto', u'Inapto'),
('apto_outra', u'Apto a Outra Funcção'),
('inapto_outra', u'Inapto a Outra Função'),
]
AVAILABLE_GENDER = [
('m', u'Masculino'),
('f', u'Feminino'),
]
class hr_religion(osv.Model):
_name = 'hr.religion'
_description = 'Religion'
_order = 'name'
_columns = {
'name': fields.char(
'Religion',
size=128,
required=True,
translate=True),
}
_sql_constraints = [
('hr_religion_unique', 'unique(name)', u'Religião já existe.'),
]
class hr_employee(osv.Model):
_inherit = 'hr.employee'
def _get_transportation_cost(
self,
cr,
uid,
ids,
field_name,
args,
context=None):
res = {}
tranportation_cost = 0.0
for emp in self.browse(cr, uid, ids, context=context):
for cost in emp.cartao_transp_ids:
tranportation_cost = tranportation_cost + cost.valor_cartao_transp
res[emp.id] = tranportation_cost
return res
_columns = {
'endereco_lotacao': fields.many2one('res.partner', u'Endereço de Lotação'),
'mariage_date': fields.date('Marriage Date'),
'is_handicapped': fields.boolean('Is Handicapped?'),
'handicap_description': fields.text('Handicap Description'),
'religion_id': fields.many2one('hr.religion', 'Religion'),
'ctps': fields.char(u'CTPS Nº', size=32),
'ctps_serie': fields.char(u'Série', size=32),
'pis_pasep': fields.char(u'PIS/Pasep', size=32),
'livro': fields.char('Livro', size=32),
'folha': fields.char('Folha', size=32),
'caixa': fields.char('Caixa', size=32),
'cpf': fields.char('CPF', size=32),
'cpf_orgao_expedidor_id': fields.many2one('orgao.expedidor', u'Órgão Expedidor'),
'eleitor': fields.char(u'Título Eleitor', size=32),
'habilitacao': fields.char(u'Habilitação', size=32),
'habilitacao_categoria_id': fields.many2one('categoria.habilitacao', 'Categoria'),
'habilitacao_validade': fields.date('Validade'),
'oab_estado_id': fields.many2one('res.country.state', 'Estado Federal'),
'oab_numero': fields.char('OAB', size=32),
'oab_validade': fields.date('Validade'),
'passaporte_orgao_expedidor_id': fields.many2one('orgao.expedidor', u'Órgão Expedidor'),
'passaporte_validade': fields.date('Validade'),
'rg': fields.char('RG', size=32),
'rg_digito': fields.char('Dg', size=1),
'rg_orgao_expedidor_id': fields.many2one('orgao.expedidor', u'Órgão Expedidor'),
'rge': fields.char('RGE', size=32),
'rge_digito': fields.char('Dg', size=1),
'rge_orgao_expedidor_id': fields.many2one('orgao.expedidor', u'Órgão Expedidor'),
'rg_militar': fields.char('RG Militar', size=32),
'rg_militar_digito': fields.char('Dg', size=1),
'rg_militar_orgao_expedidor_id': fields.many2one('orgao.expedidor', u'Órgão Expedidor'),
'conselho_regional_id': fields.many2one('conselho.regional', 'Conselho Regional'),
'conselho_regional_estado_id': fields.many2one('res.country.state', 'Estado Federal'),
'conselho_regional_numero': fields.char(u'CR Nº', size=32),
'conselho_regional_validade': fields.date('Validade'),
'escolaridade_ids': fields.one2many('employee.escolaridade', 'employee_id'),
'cursos_certificacoes_ids': fields.one2many('employee.cursos.certificacoes', 'employee_id'),
'exame_medico_ids': fields.one2many('employee.exame.medico', 'employee_id'),
'dependentes_ids': fields.one2many('employee.dependente', 'employee_id'),
'idiomas_ids': fields.one2many('employee.idioma', 'employee_id'),
'father_name': fields.char(u"Father's Name", size=256),
'mother_name': fields.char(u"Mother's Name", size=256),
'spec': fields.char("SPEC", size=64),
'health_insurance': fields.boolean('Health Insurance'),
'transportation_cost': fields.function(_get_transportation_cost, type='float', string=u'Condução'),
'cartao_transp_ids': fields.one2many('hr.employee.transportation.card', 'employee_id', u'Cartão Transporte'),
'meal_voucher': fields.float('Meal Voucher'),
'health_insurance_value': fields.float('Health Insurance Value'),
}
class hr_motivo(osv.Model):
_name = 'hr.motivo'
_description = u'Motivo'
_columns = {
'name': fields.char('Motivo', size=128, required=True, translate=True),
}
_sql_constraints = [
('hr_motivo_unique', 'unique(name)', u'Motivo já existe.'),
]
class employee_escolaridade(osv.Model):
_name = 'employee.escolaridade'
_description = 'Escolaridade'
_order = 'date_start desc'
_columns = {
'employee_id': fields.many2one(
'hr.employee',
'Employee',
required=True),
'grau_instrucao': fields.many2one(
'grau.instrucao',
u'Grau de Instrução',
required=True),
'instituicao': fields.many2one(
'hr.instituicao',
u'Instituição',
required=True),
'curso': fields.char(
'Curso',
size=128,
required=True),
'status': fields.selection(
AVAILABLE_STATUS,
'Status',
required=True,
translate=True),
'date_start': fields.date(
u'Data Início',
required=True),
'date_end': fields.date(u'Data Conclusão'),
'observations': fields.text(u'Observações'),
}
_sql_constraints = [
('date_sequence',
'CHECK ((date_end IS NOT NULL AND date_start <= date_end) OR date_end IS NULL)',
u'A data de início deve ser menor que a data de finalização !'),
]
class employee_cursos_certificacoes(osv.Model):
_name = 'employee.cursos.certificacoes'
_description = u'Cursos e Certificações'
_order = 'date_start desc'
_columns = {
'employee_id': fields.many2one(
'hr.employee',
'Employee',
required=True),
'tipo_evento': fields.many2one(
'tipo.evento',
'Tipo Evento',
required=True),
'instituicao': fields.many2one(
'hr.instituicao',
u'Instituição',
required=True),
'curso': fields.char(
'Curso',
size=128,
required=True),
'status': fields.selection(
AVAILABLE_STATUS,
'Status',
required=True,
translate=True),
'date_start': fields.date(
u'Data Início',
required=True),
'date_end': fields.date(u'Data Conclusão'),
'observations': fields.text(u'Observações'),
}
_sql_constraints = [
('date_sequence',
'CHECK ((date_end IS NOT NULL AND date_start <= date_end) OR date_end IS NULL)',
u'A data de início deve ser menor que a data de finalização !'),
]
class grau_instrucao(osv.Model):
_name = 'grau.instrucao'
_description = u'Grau de Instrução'
_columns = {
'name': fields.char('Grau', size=128, required=True, translate=True),
}
_sql_constraints = [
('grau_instrucao_unique',
'unique(name)',
u'Grau instrução já existe.'),
]
class tipo_evento(osv.Model):
_name = 'tipo.evento'
_description = 'Tipo Evento'
_columns = {
'name': fields.char(
'Tipo Evento',
size=128,
required=True,
translate=True),
}
_sql_constraints = [
('tipo_evento_unique', 'unique(name)', u'Tipo evento já existe.'),
]
class hr_instituicao(osv.Model):
_name = 'hr.instituicao'
_description = u'Instituição'
_columns = {
'name': fields.char(
u'Instituição',
size=128,
required=True,
translate=True),
}
_sql_constraints = [
('hr_instituicao_unique', 'unique(name)', u'Instituição já existe.'),
]
class employee_idioma(osv.Model):
_name = 'employee.idioma'
_description = 'Idiomas'
_order = 'date_start desc'
_columns = {
'employee_id': fields.many2one(
'hr.employee',
'Employee',
required=True),
'idioma': fields.selection(
tools.scan_languages(),
'Idioma',
required=True),
'instituicao': fields.many2one(
'hr.instituicao',
u'Instituição',
required=True),
'curso': fields.char(
'Curso',
size=128,
required=True),
'status': fields.selection(
AVAILABLE_LANGUAGE_STATUS,
'Status',
required=True,
translate=True),
'date_start': fields.date(
u'Data Início',
required=True),
'date_end': fields.date(u'Data Conclusão'),
'observations': fields.text(u'Observações'),
}
_sql_constraints = [
('date_sequence',
'CHECK ((date_end IS NOT NULL AND date_start <= date_end) OR date_end IS NULL)',
u'A data de início deve ser menor que a data de finalização !'),
]
class employee_exame_medico(osv.Model):
_name = 'employee.exame.medico'
_description = u'Exame Médico'
_order = 'data desc'
_columns = {
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'tipo_exame': fields.many2one('tipo.exame', 'Tipo Exame', required=True),
'data': fields.date('Data Início', required=True),
'clinica': fields.many2one('hr.clinica', u'Clínica', required=True),
'medico': fields.many2one('hr.medico', u'Médico', required=True),
'local': fields.many2one('hr.local', 'Local', required=True),
'os': fields.char('OS', size=16, required=True),
'status': fields.selection(AVAILABLE_EXAM_STATUS, 'Status', required=True, translate=True),
}
class tipo_exame(osv.Model):
_name = 'tipo.exame'
_description = 'Tipo Exame'
_columns = {
'name': fields.char(
'Tipo Exame',
size=128,
required=True,
translate=True),
}
_sql_constraints = [
('tipo_exame_unique', 'unique(name)', u'Tipo exame já existe.'),
]
class hr_clinica(osv.Model):
_name = 'hr.clinica'
_description = u'Clínica'
_columns = {
'name': fields.char(
u'Clínica',
size=128,
required=True,
translate=True),
}
_sql_constraints = [
('hr_clinica_unique', 'unique(name)', u'Clinica já existe.'),
]
class hr_medico(osv.Model):
_name = 'hr.medico'
_description = u'Médico'
_columns = {
'name': fields.char(
u'Médico',
size=128,
required=True,
translate=True),
}
_sql_constraints = [
('hr_medico_unique', 'unique(name)', u'Médico já existe.'),
]
class hr_local(osv.Model):
_name = 'hr.local'
_description = 'Local'
_columns = {
'name': fields.char('Local', size=128, required=True, translate=True),
}
_sql_constraints = [
('hr_local_unique', 'unique(name)', u'Local já existe.'),
]
class employee_dependente(osv.Model):
_name = 'employee.dependente'
_description = 'Dependente'
_order = 'birth_date desc'
def _age(self, birth_date):
now = date.today()
age_date = datetime.strptime(birth_date, '%Y-%m-%d').date()
age = now.year - age_date.year - (0 if (now.month > age_date.month or (
now.month == age_date.month and now.day >= age_date.day)) else 1)
return age
def _calculate_age(self, cr, uid, ids, field_name, arg, context):
res = {}
for partner in self.browse(cr, uid, ids):
if partner.birth_date:
res[partner.id] = self._age(partner.birth_date)
else:
res[partner.id] = 0
return res
_columns = {
'name': fields.char(
'Nome',
size=256,
required=True),
'employee_id': fields.many2one(
'hr.employee',
'Employee',
required=True),
'sexo': fields.selection(
AVAILABLE_GENDER,
u'Género',
required=True,
translate=True),
'birth_date': fields.date(
'Data Nascimento',
required=True),
'parentesco': fields.many2one(
'hr.parentesco',
'Parentesco',
required=True),
'grau_instrucao': fields.many2one(
'grau.instrucao',
u'Grau de Instrução',
required=True),
'is_handicapped': fields.boolean(u'PCD - Pessoa Com Deficiência?'),
'handicap_description': fields.text(u'Descrever a deficiência'),
'mora_com': fields.boolean(u'Mora com o titular?'),
'age': fields.function(
_calculate_age,
method=True,
type='integer',
string='Age'),
}
class hr_parentesco(osv.Model):
_name = 'hr.parentesco'
_description = 'Parentesco'
_columns = {
'name': fields.char(
'Parentesco',
size=128,
required=True,
translate=True),
}
_sql_constraints = [
('hr_parentesco_unique', 'unique(name)', u'Parentesco já existe.'),
]
class conselho_regional(osv.Model):
_name = 'conselho.regional'
_description = 'Conselho Regional'
_columns = {
'name': fields.char(
'Conselho Regional',
size=128,
required=True,
translate=True),
}
_sql_constraints = [
('conselho_regional_unique',
'unique(name)',
u'Conselho regional já existe.'),
]
class orgao_expedidor(osv.Model):
_name = 'orgao.expedidor'
_description = u'Órgão Expedidor'
_columns = {
'name': fields.char(
u'Órgão Expedidor',
size=128,
required=True,
translate=True),
}
_sql_constraints = [
('orgao_expedidor_unique',
'unique(name)',
u'Órgão expedidor já existe.'),
]
class categoria_habilitacao(osv.Model):
_name = 'categoria.habilitacao'
_description = u'Categoria Habilitação'
_columns = {
'name': fields.char(
u'Categoria',
size=128,
required=True,
translate=True),
}
_sql_constraints = [
('categoria_habilitacao_unique',
'unique(name)',
u'Categoria habilitação já existe.'),
]
class hr_employee_transportation_card(osv.Model):
_name = 'hr.employee.transportation.card'
_description = u'Cartão Transporte'
_columns = {
'employee_id': fields.many2one('hr.employee', 'Employee'),
'name': fields.char(u'Cartão Transporte', size=64),
'tipo_cartao_id': fields.many2one('hr.employee.transp.card', u'Tipo'),
'valor_cartao_transp': fields.float(u'Valor'),
}
class hr_employee_transp_card(osv.Model):
_name = 'hr.employee.transp.card'
_description = u'Tipo Cartão'
_columns = {
'name': fields.char(u'Tipo', size=128, required=True, translate=True),
}
_sql_constraints = [
('hr_employee_transp_card_unique',
'unique(name)',
u'Tipo de cartão já existe.'),
]
| elego/tkobr-addons | unported/tko_l10n_br_hr/l10n_br_hr.py | Python | agpl-3.0 | 17,315 |
# Copyright (c) 2001-2006 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides wxPython event loop support for Twisted.
In order to use this support, simply do the following::
| from twisted.internet import wxreactor
| wxreactor.install()
Then, when your root wxApp has been created::
| from twisted.internet import reactor
| reactor.registerWxApp(yourApp)
| reactor.run()
Then use twisted.internet APIs as usual. Stop the event loop using
reactor.stop(), not yourApp.ExitMainLoop().
IMPORTANT: tests will fail when run under this reactor. This is
expected and probably does not reflect on the reactor's ability to run
real applications.
Maintainer: Itamar Shtull-Trauring
"""
import Queue
try:
from wx import PySimpleApp as wxPySimpleApp, CallAfter as wxCallAfter, \
Timer as wxTimer
except ImportError:
# older version of wxPython:
from wxPython.wx import wxPySimpleApp, wxCallAfter, wxTimer
from twisted.python import log, runtime
from twisted.internet import _threadedselect
class ProcessEventsTimer(wxTimer):
"""
Timer that tells wx to process pending events.
This is necessary on OS X, probably due to a bug in wx, if we want
wxCallAfters to be handled when modal dialogs, menus, etc. are open.
"""
def __init__(self, wxapp):
wxTimer.__init__(self)
self.wxapp = wxapp
def Notify(self):
"""
Called repeatedly by wx event loop.
"""
self.wxapp.ProcessPendingEvents()
class WxReactor(_threadedselect.ThreadedSelectReactor):
"""
wxPython reactor.
wxPython drives the event loop, select() runs in a thread.
"""
_stopping = False
def registerWxApp(self, wxapp):
"""
Register wxApp instance with the reactor.
"""
self.wxapp = wxapp
def _installSignalHandlersAgain(self):
"""
wx sometimes removes our own signal handlers, so re-add them.
"""
try:
# make _handleSignals happy:
import signal
signal.signal(signal.SIGINT, signal.default_int_handler)
except ImportError:
return
self._handleSignals()
def stop(self):
"""
Stop the reactor.
"""
if self._stopping:
return
self._stopping = True
_threadedselect.ThreadedSelectReactor.stop(self)
def _runInMainThread(self, f):
"""
Schedule function to run in main wx/Twisted thread.
Called by the select() thread.
"""
if hasattr(self, "wxapp"):
wxCallAfter(f)
else:
# wx shutdown but twisted hasn't
self._postQueue.put(f)
def _stopWx(self):
"""
Stop the wx event loop if it hasn't already been stopped.
Called during Twisted event loop shutdown.
"""
if hasattr(self, "wxapp"):
self.wxapp.ExitMainLoop()
def run(self, installSignalHandlers=True):
"""
Start the reactor.
"""
self._postQueue = Queue.Queue()
if not hasattr(self, "wxapp"):
log.msg("registerWxApp() was not called on reactor, "
"registering my own wxApp instance.")
self.registerWxApp(wxPySimpleApp())
# start select() thread:
self.interleave(self._runInMainThread,
installSignalHandlers=installSignalHandlers)
if installSignalHandlers:
self.callLater(0, self._installSignalHandlersAgain)
# add cleanup events:
self.addSystemEventTrigger("after", "shutdown", self._stopWx)
self.addSystemEventTrigger("after", "shutdown",
lambda: self._postQueue.put(None))
# On Mac OS X, work around wx bug by starting timer to ensure
# wxCallAfter calls are always processed. We don't wake up as
# often as we could since that uses too much CPU.
if runtime.platform.isMacOSX():
t = ProcessEventsTimer(self.wxapp)
t.Start(2) # wake up every 2ms
self.wxapp.MainLoop()
wxapp = self.wxapp
del self.wxapp
if not self._stopping:
# wx event loop exited without reactor.stop() being
# called. At this point events from select() thread will
# be added to _postQueue, but some may still be waiting
# unprocessed in wx, thus the ProcessPendingEvents()
# below.
self.stop()
wxapp.ProcessPendingEvents() # deal with any queued wxCallAfters
while 1:
try:
f = self._postQueue.get(timeout=0.01)
except Queue.Empty:
continue
else:
if f is None:
break
try:
f()
except:
log.err()
def install():
"""
Configure the twisted mainloop to be run inside the wxPython mainloop.
"""
reactor = WxReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
__all__ = ['install']
| sorenh/cc | vendor/Twisted-10.0.0/twisted/internet/wxreactor.py | Python | apache-2.0 | 5,270 |
# Copyright (C) 2016 Rafael Moura
import io
import sys
from parser import *
def main():
try:
file = open(sys.argv[1], "r");
except Exception as err:
sys.stderr.write("open file failed: " + str(err) + "\n");
lines = file.readlines();
file.close();
parse(lines);
try:
ofile = open("output.bin", "wb")
ofile.write(bytes(binary))
except Exception as err:
sys.stderr.write("Failed to write to output.bin: " + str(err))
finally:
ofile.close()
return 0;
main();
| dhustkoder/EMT | OP_CPU_MEMORY/assembler/py_asm1/main.py | Python | gpl-3.0 | 495 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib as mpl
import seaborn as sb
import sys
import time
from scipy import stats
def id( x ):
return x
def cleanCSV( csv_in, csv_out ):
Data = pd.read_csv( csv_in )
dontWant = [
'OTHER PROPERTY',
'LOST PROPERTY',
'THEFT OF SERVICES',
'[INC - CASE DC USE ONLY]',
'RECOVERED PROPERTY',
'FRAUD',
'FORGERY',
'EMBEZZLE',
'ELUDING',
'BIAS INCIDENT',
'FALSE REPORT',
'PUBLIC NUISANCE',
'EXTORTION',
'OBSTRUCT',
'STAY OUT OF AREA OF DRUGS',
'PURSE SNATCH',
'FIREWORK',
'ESCAPE',
'PORNOGRAPHY',
'GAMBLE',
'STAY OUT OF AREA OF PROSTITUTION',
'HARBOR CALLs',
'HARBOR CALLS',
'Purse Snatch' ,
'Car Prowl',
'RECKLESS BURNING',
'Shoplifting',
'LOITERING',
'DISORDERLY CONDUCT',
'Bike Theft',
'ILLEGAL DUMPING'
]
Data.Summary = Data.Summary.map( lambda x:
( np.nan if x in dontWant else x ))
Data.dropna( subset=['Summary'], inplace=True)
Data.to_csv( csv_out, index=False )
def main( argv ):
try:
program_name = argv[ 0 ]
csv_in = argv[ 1 ]
csv_out = argv[ 2 ]
except:
print( "Usage: 'python3 {} <csv_file> <map_file> <output_file>'".format( sys.argv[0] ) )
return
cleanCSV( csv_in, csv_out )
if __name__=='__main__':
main( sys.argv )
| CKPalk/SeattleCrime_DM | DataMining/Stats/data_reduction.py | Python | mit | 1,356 |
#!/usr/bin/env python
# encoding: utf-8
import os
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
from tbone import __version__
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
setup(
name='tbone',
version=__version__,
description='Full-duplex RESTful APIs for asyncio web applications',
long_description= read('README.md'),
author="475 Cumulus Ltd.",
author_email='[email protected]',
url='https://github.com/475Cumulus/TBone',
license='MIT',
python_requires='>=3.5.0',
packages=find_packages(),
install_requires=[i.strip() for i in open("requirements.txt").readlines()],
tests_require=[],
classifiers=[
'Environment :: Web Environment',
'Framework :: AsyncIO',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Topic :: Internet :: WWW/HTTP',
],
)
| 475Cumulus/TBone | setup.py | Python | mit | 1,316 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'CustomUser.language'
db.delete_column('app_customuser', 'language')
# Changing field 'Tweet.user'
db.alter_column('app_tweet', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['app.CustomUser']))
def backwards(self, orm):
# Adding field 'CustomUser.language'
db.add_column('app_customuser', 'language',
self.gf('django.db.models.fields.CharField')(default='en', max_length=10),
keep_default=False)
# Changing field 'Tweet.user'
db.alter_column('app_tweet', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['social_auth.UserSocialAuth']))
models = {
'app.customuser': {
'Meta': {'object_name': 'CustomUser'},
'activity_interval': ('django.db.models.fields.IntegerField', [], {'default': '2419200'}),
'alwaysupdate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'configured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dead': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'half_dead': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'mail_interval': ('django.db.models.fields.IntegerField', [], {'default': '1209600'}),
'neverupdate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'next_check': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'nottodayupdate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publish_interval': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['social_auth.UserSocialAuth']", 'unique': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'wait_mail': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'app.tweet': {
'Meta': {'object_name': 'Tweet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'text': ('django_fields.fields.EncryptedCharField', [], {'unique': 'True', 'max_length': '293', 'cipher': "'AES'"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['app.CustomUser']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'social_auth.usersocialauth': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'UserSocialAuth'},
'extra_data': ('social_auth.fields.JSONField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_auth'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['app'] | eduherraiz/foowill | app/migrations/0006_auto__del_field_customuser_language__chg_field_tweet_user.py | Python | mit | 7,011 |
import json
import os
from datetime import datetime
from tempfile import NamedTemporaryFile
from time import strftime, strptime
from django.views.decorators.http import require_POST
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseForbidden,\
HttpResponseRedirect, HttpResponseNotFound, HttpResponseBadRequest,\
HttpResponse
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.core.files.storage import FileSystemStorage
from django.core.files.storage import get_storage_class
from main.models import UserProfile, MetaData, TokenStorageModel
from odk_logger.models import XForm, Attachment
from odk_logger.views import download_jsonform
from odk_viewer.models import DataDictionary, ParsedInstance
from odk_viewer.pandas_mongo_bridge import NoRecordsFoundError
from utils.image_tools import image_url
from xls_writer import XlsWriter
from utils.logger_tools import response_with_mimetype_and_name,\
disposition_ext_and_date
from utils.viewer_tools import image_urls
from odk_viewer.tasks import create_async_export
from utils.user_auth import has_permission, get_xform_and_perms,\
helper_auth_helper
from utils.google import google_export_xls, redirect_uri
# TODO: using from main.views import api breaks the application, why?
from odk_viewer.models import Export
from utils.export_tools import generate_export, should_create_new_export
from utils.export_tools import kml_export_data
from utils.export_tools import newset_export_for
from utils.viewer_tools import export_def_from_filename
from utils.viewer_tools import create_attachments_zipfile
from utils.log import audit_log, Actions
from common_tags import SUBMISSION_TIME
def encode(time_str):
time = strptime(time_str, "%Y_%m_%d_%H_%M_%S")
return strftime("%Y-%m-%d %H:%M:%S", time)
def dd_for_params(id_string, owner, request):
start = end = None
dd = DataDictionary.objects.get(id_string=id_string,
user=owner)
if request.GET.get('start'):
try:
start = encode(request.GET['start'])
except ValueError:
# bad format
return [False,
HttpResponseBadRequest(
_(u'Start time format must be YY_MM_DD_hh_mm_ss'))
]
dd.surveys_for_export = \
lambda d: d.surveys.filter(date_created__gte=start)
if request.GET.get('end'):
try:
end = encode(request.GET['end'])
except ValueError:
# bad format
return [False,
HttpResponseBadRequest(
_(u'End time format must be YY_MM_DD_hh_mm_ss'))
]
dd.surveys_for_export = \
lambda d: d.surveys.filter(date_created__lte=end)
if start and end:
dd.surveys_for_export = \
lambda d: d.surveys.filter(date_created__lte=end,
date_created__gte=start)
return [True, dd]
def parse_label_for_display(pi, xpath):
label = pi.data_dictionary.get_label(xpath)
if not type(label) == dict:
label = {'Unknown': label}
return label.items()
def average(values):
if len(values):
return sum(values, 0.0) / len(values)
return None
def map_view(request, username, id_string, template='map.html'):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
context = RequestContext(request)
context.content_user = owner
context.xform = xform
context.profile, created = UserProfile.objects.get_or_create(user=owner)
context.form_view = True
context.jsonform_url = reverse(download_jsonform,
kwargs={"username": username,
"id_string": id_string})
context.enketo_edit_url = reverse('edit_data',
kwargs={"username": username,
"id_string": id_string,
"data_id": 0})
context.enketo_add_url = reverse('enter_data',
kwargs={"username": username,
"id_string": id_string})
context.enketo_add_with_url = reverse('add_submission_with',
kwargs={"username": username,
"id_string": id_string})
context.mongo_api_url = reverse('mongo_view_api',
kwargs={"username": username,
"id_string": id_string})
context.delete_data_url = reverse('delete_data',
kwargs={"username": username,
"id_string": id_string})
context.mapbox_layer = MetaData.mapbox_layer_upload(xform)
audit = {
"xform": xform.id_string
}
audit_log(Actions.FORM_MAP_VIEWED, request.user, owner,
_("Requested map on '%(id_string)s'.")
% {'id_string': xform.id_string}, audit, request)
return render_to_response(template, context_instance=context)
def map_embed_view(request, username, id_string):
return map_view(request, username, id_string, template='map_embed.html')
def add_submission_with(request, username, id_string):
import uuid
import requests
from django.conf import settings
from django.template import loader, Context
from dpath import util as dpath_util
from dict2xml import dict2xml
def geopoint_xpaths(username, id_string):
d = DataDictionary.objects.get(user__username=username, id_string=id_string)
return [e.get_abbreviated_xpath()
for e in d.get_survey_elements()
if e.bind.get(u'type') == u'geopoint']
value = request.GET.get('coordinates')
xpaths = geopoint_xpaths(username, id_string)
xml_dict = {}
for path in xpaths:
dpath_util.new(xml_dict, path, value)
context = {'username': username,
'id_string': id_string,
'xml_content': dict2xml(xml_dict)}
instance_xml = loader.get_template("instance_add.xml").render(Context(context))
url = settings.ENKETO_API_INSTANCE_IFRAME_URL
return_url = reverse('thank_you_submission', kwargs={"username": username,
"id_string": id_string})
if settings.DEBUG:
openrosa_url = "https://dev.formhub.org/{}".format(username)
else:
openrosa_url = request.build_absolute_uri("/{}".format(username))
payload = {'return_url': return_url,
'form_id': id_string,
'server_url': openrosa_url,
'instance': instance_xml,
'instance_id': uuid.uuid4().hex}
r = requests.post(url, data=payload,
auth=(settings.ENKETO_API_TOKEN, ''), verify=False)
return HttpResponse(r.text, mimetype='application/json')
def thank_you_submission(request, username, id_string):
return HttpResponse("Thank You")
# TODO: do a good job of displaying hierarchical data
def survey_responses(request, instance_id):
pi = get_object_or_404(ParsedInstance, instance=instance_id)
xform, is_owner, can_edit, can_view = \
get_xform_and_perms(pi.instance.user.username,
pi.instance.xform.id_string, request)
# no access
if not (xform.shared_data or can_view or
request.session.get('public_link') == xform.uuid):
return HttpResponseRedirect('/')
data = pi.to_dict()
# get rid of keys with leading underscores
data_for_display = {}
for k, v in data.items():
if not k.startswith(u"_"):
data_for_display[k] = v
xpaths = data_for_display.keys()
xpaths.sort(cmp=pi.data_dictionary.get_xpath_cmp())
label_value_pairs = [
(parse_label_for_display(pi, xpath),
data_for_display[xpath]) for xpath in xpaths
]
languages = label_value_pairs[-1][0]
audit = {
"xform": xform.id_string,
"instance_id": instance_id
}
audit_log(
Actions.FORM_DATA_VIEWED, request.user, xform.user,
_("Requested survey with id '%(instance_id)s' on '%(id_string)s'.") %
{
'id_string': xform.id_string,
'instance_id': instance_id
}, audit, request)
return render_to_response('survey.html', {
'label_value_pairs': label_value_pairs,
'image_urls': image_urls(pi.instance),
'languages': languages,
'default_language': languages[0][0]
})
def data_export(request, username, id_string, export_type):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
helper_auth_helper(request)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
query = request.GET.get("query")
extension = export_type
# check if we should force xlsx
force_xlsx = request.GET.get('xls') != 'true'
if export_type == Export.XLS_EXPORT and force_xlsx:
extension = 'xlsx'
elif export_type == Export.CSV_ZIP_EXPORT:
extension = 'zip'
audit = {
"xform": xform.id_string,
"export_type": export_type
}
# check if we need to re-generate,
# we always re-generate if a filter is specified
if should_create_new_export(xform, export_type) or query or\
'start' in request.GET or 'end' in request.GET:
format_date_for_mongo = lambda x, datetime: datetime.strptime(
x, '%y_%m_%d_%H_%M_%S').strftime('%Y-%m-%dT%H:%M:%S')
# check for start and end params
if 'start' in request.GET or 'end' in request.GET:
if not query:
query = '{}'
query = json.loads(query)
query[SUBMISSION_TIME] = {}
try:
if request.GET.get('start'):
query[SUBMISSION_TIME]['$gte'] = format_date_for_mongo(
request.GET['start'], datetime)
if request.GET.get('end'):
query[SUBMISSION_TIME]['$lte'] = format_date_for_mongo(
request.GET['end'], datetime)
except ValueError:
return HttpResponseBadRequest(
_("Dates must be in the format YY_MM_DD_hh_mm_ss"))
else:
query = json.dumps(query)
try:
export = generate_export(
export_type, extension, username, id_string, None, query)
audit_log(
Actions.EXPORT_CREATED, request.user, owner,
_("Created %(export_type)s export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
'export_type': export_type.upper()
}, audit, request)
except NoRecordsFoundError:
return HttpResponseNotFound(_("No records found to export"))
else:
export = newset_export_for(xform, export_type)
# log download as well
audit_log(
Actions.EXPORT_DOWNLOADED, request.user, owner,
_("Downloaded %(export_type)s export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
'export_type': export_type.upper()
}, audit, request)
if not export.filename:
# tends to happen when using newset_export_for.
return HttpResponseNotFound("File does not exist!")
# get extension from file_path, exporter could modify to
# xlsx if it exceeds limits
path, ext = os.path.splitext(export.filename)
ext = ext[1:]
if request.GET.get('raw'):
id_string = None
response = response_with_mimetype_and_name(
Export.EXPORT_MIMES[ext], id_string, extension=ext,
file_path=export.filepath)
return response
@require_POST
def create_export(request, username, id_string, export_type):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
query = request.POST.get("query")
force_xlsx = request.POST.get('xls') != 'true'
# export options
group_delimiter = request.POST.get("options[group_delimiter]", '/')
if group_delimiter not in ['.', '/']:
return HttpResponseBadRequest(
_("%s is not a valid delimiter" % group_delimiter))
# default is True, so when dont_.. is yes
# split_select_multiples becomes False
split_select_multiples = request.POST.get(
"options[dont_split_select_multiples]", "no") == "no"
options = {
'group_delimiter': group_delimiter,
'split_select_multiples': split_select_multiples
}
try:
create_async_export(xform, export_type, query, force_xlsx, options)
except Export.ExportTypeError:
return HttpResponseBadRequest(
_("%s is not a valid export type" % export_type))
else:
audit = {
"xform": xform.id_string,
"export_type": export_type
}
audit_log(
Actions.EXPORT_CREATED, request.user, owner,
_("Created %(export_type)s export on '%(id_string)s'.") %
{
'export_type': export_type.upper(),
'id_string': xform.id_string,
}, audit, request)
return HttpResponseRedirect(reverse(
export_list,
kwargs={
"username": username,
"id_string": id_string,
"export_type": export_type
})
)
def _get_google_token(request, redirect_to_url):
token = None
if request.user.is_authenticated():
try:
ts = TokenStorageModel.objects.get(id=request.user)
except TokenStorageModel.DoesNotExist:
pass
else:
token = ts.token
elif request.session.get('access_token'):
token = request.session.get('access_token')
if token is None:
request.session["google_redirect_url"] = redirect_to_url
return HttpResponseRedirect(redirect_uri)
return token
def export_list(request, username, id_string, export_type):
if export_type == Export.GDOC_EXPORT:
redirect_url = reverse(
export_list,
kwargs={
'username': username, 'id_string': id_string,
'export_type': export_type})
token = _get_google_token(request, redirect_url)
if isinstance(token, HttpResponse):
return token
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
if should_create_new_export(xform, export_type):
try:
create_async_export(
xform, export_type, query=None, force_xlsx=True)
except Export.ExportTypeError:
return HttpResponseBadRequest(
_("%s is not a valid export type" % export_type))
context = RequestContext(request)
context.username = owner.username
context.xform = xform
# TODO: better output e.g. Excel instead of XLS
context.export_type = export_type
context.export_type_name = Export.EXPORT_TYPE_DICT[export_type]
exports = Export.objects.filter(xform=xform, export_type=export_type)\
.order_by('-created_on')
context.exports = exports
return render_to_response('export_list.html', context_instance=context)
def export_progress(request, username, id_string, export_type):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
# find the export entry in the db
export_ids = request.GET.getlist('export_ids')
exports = Export.objects.filter(xform=xform, id__in=export_ids)
statuses = []
for export in exports:
status = {
'complete': False,
'url': None,
'filename': None,
'export_id': export.id
}
if export.status == Export.SUCCESSFUL:
status['url'] = reverse(export_download, kwargs={
'username': owner.username,
'id_string': xform.id_string,
'export_type': export.export_type,
'filename': export.filename
})
status['filename'] = export.filename
if export.export_type == Export.GDOC_EXPORT and \
export.export_url is None:
redirect_url = reverse(
export_progress,
kwargs={
'username': username, 'id_string': id_string,
'export_type': export_type})
token = _get_google_token(request, redirect_url)
if isinstance(token, HttpResponse):
return token
status['url'] = None
try:
url = google_export_xls(
export.full_filepath, xform.title, token, blob=True)
except Exception, e:
status['error'] = True
status['message'] = e.message
else:
export.export_url = url
export.save()
status['url'] = url
# mark as complete if it either failed or succeeded but NOT pending
if export.status == Export.SUCCESSFUL \
or export.status == Export.FAILED:
status['complete'] = True
statuses.append(status)
return HttpResponse(
json.dumps(statuses), mimetype='application/json')
def export_download(request, username, id_string, export_type, filename):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
helper_auth_helper(request)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
# find the export entry in the db
export = get_object_or_404(Export, xform=xform, filename=filename)
if export_type == Export.GDOC_EXPORT and export.export_url is not None:
return HttpResponseRedirect(export.export_url)
ext, mime_type = export_def_from_filename(export.filename)
audit = {
"xform": xform.id_string,
"export_type": export.export_type
}
audit_log(
Actions.EXPORT_DOWNLOADED, request.user, owner,
_("Downloaded %(export_type)s export '%(filename)s' "
"on '%(id_string)s'.") %
{
'export_type': export.export_type.upper(),
'filename': export.filename,
'id_string': xform.id_string,
}, audit, request)
if request.GET.get('raw'):
id_string = None
default_storage = get_storage_class()()
if not isinstance(default_storage, FileSystemStorage):
return HttpResponseRedirect(default_storage.url(export.filepath))
basename = os.path.splitext(export.filename)[0]
response = response_with_mimetype_and_name(
mime_type, name=basename, extension=ext,
file_path=export.filepath, show_date=False)
return response
@require_POST
def delete_export(request, username, id_string, export_type):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
export_id = request.POST.get('export_id')
# find the export entry in the db
export = get_object_or_404(Export, id=export_id)
export.delete()
audit = {
"xform": xform.id_string,
"export_type": export.export_type
}
audit_log(
Actions.EXPORT_DOWNLOADED, request.user, owner,
_("Deleted %(export_type)s export '%(filename)s'"
" on '%(id_string)s'.") %
{
'export_type': export.export_type.upper(),
'filename': export.filename,
'id_string': xform.id_string,
}, audit, request)
return HttpResponseRedirect(reverse(
export_list,
kwargs={
"username": username,
"id_string": id_string,
"export_type": export_type
}))
def zip_export(request, username, id_string):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
helper_auth_helper(request)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
if request.GET.get('raw'):
id_string = None
attachments = Attachment.objects.filter(instance__xform=xform)
zip_file = create_attachments_zipfile(attachments)
audit = {
"xform": xform.id_string,
"export_type": Export.ZIP_EXPORT
}
audit_log(
Actions.EXPORT_CREATED, request.user, owner,
_("Created ZIP export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
# log download as well
audit_log(
Actions.EXPORT_DOWNLOADED, request.user, owner,
_("Downloaded ZIP export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
if request.GET.get('raw'):
id_string = None
response = response_with_mimetype_and_name('zip', id_string,
file_path=zip_file,
use_local_filesystem=True)
return response
def kml_export(request, username, id_string):
# read the locations from the database
context = RequestContext(request)
context.message = "HELLO!!"
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
helper_auth_helper(request)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
context.data = kml_export_data(id_string, user=owner)
response = \
render_to_response("survey.kml", context_instance=context,
mimetype="application/vnd.google-earth.kml+xml")
response['Content-Disposition'] = \
disposition_ext_and_date(id_string, 'kml')
audit = {
"xform": xform.id_string,
"export_type": Export.KML_EXPORT
}
audit_log(
Actions.EXPORT_CREATED, request.user, owner,
_("Created KML export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
# log download as well
audit_log(
Actions.EXPORT_DOWNLOADED, request.user, owner,
_("Downloaded KML export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
return response
def google_xls_export(request, username, id_string):
token = None
if request.user.is_authenticated():
try:
ts = TokenStorageModel.objects.get(id=request.user)
except TokenStorageModel.DoesNotExist:
pass
else:
token = ts.token
elif request.session.get('access_token'):
token = request.session.get('access_token')
if token is None:
request.session["google_redirect_url"] = reverse(
google_xls_export,
kwargs={'username': username, 'id_string': id_string})
return HttpResponseRedirect(redirect_uri)
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
valid, dd = dd_for_params(id_string, owner, request)
if not valid:
return dd
ddw = XlsWriter()
tmp = NamedTemporaryFile(delete=False)
ddw.set_file(tmp)
ddw.set_data_dictionary(dd)
temp_file = ddw.save_workbook_to_file()
temp_file.close()
url = google_export_xls(tmp.name, xform.title, token, blob=True)
os.unlink(tmp.name)
audit = {
"xform": xform.id_string,
"export_type": "google"
}
audit_log(
Actions.EXPORT_CREATED, request.user, owner,
_("Created Google Docs export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
return HttpResponseRedirect(url)
def data_view(request, username, id_string):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
context = RequestContext(request)
context.owner = owner
context.xform = xform
audit = {
"xform": xform.id_string,
}
audit_log(
Actions.FORM_DATA_VIEWED, request.user, owner,
_("Requested data view for '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
return render_to_response("data_view.html", context_instance=context)
def attachment_url(request, size='medium'):
media_file = request.GET.get('media_file')
# TODO: how to make sure we have the right media file,
# this assumes duplicates are the same file
result = Attachment.objects.filter(media_file=media_file)[0:1]
if result.count() == 0:
return HttpResponseNotFound(_(u'Attachment not found'))
attachment = result[0]
if not attachment.mimetype.startswith('image'):
return redirect(attachment.media_file.url)
try:
media_url = image_url(attachment, size)
except:
# TODO: log this somewhere
# image not found, 404, S3ResponseError timeouts
pass
else:
if media_url:
return redirect(media_url)
return HttpResponseNotFound(_(u'Error: Attachment not found'))
def instance(request, username, id_string):
xform, is_owner, can_edit, can_view = get_xform_and_perms(
username, id_string, request)
# no access
if not (xform.shared_data or can_view or
request.session.get('public_link') == xform.uuid):
return HttpResponseForbidden(_(u'Not shared.'))
context = RequestContext(request)
audit = {
"xform": xform.id_string,
}
audit_log(
Actions.FORM_DATA_VIEWED, request.user, xform.user,
_("Requested instance view for '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
return render_to_response('instance.html', {
'username': username,
'id_string': id_string,
'xform': xform,
'can_edit': can_edit
}, context_instance=context)
| SEL-Columbia/formhub | odk_viewer/views.py | Python | bsd-2-clause | 27,537 |
# -*- coding: utf-8 -*-
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file LICENSE.txt. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *from mediainfodll import MediaInfo, Stream
from mediainfodll import MediaInfo, Stream
import xbmc
import xbmcaddon
__addon__ = xbmcaddon.Addon()
__cwd__ = __addon__.getAddonInfo('path')
__scriptname__ = __addon__.getAddonInfo('name')
def info(msg):
xbmc.log("### [%s] - %s" % (__scriptname__, msg,), level=xbmc.LOGNOTICE)
class Media(object):
def __init__(self):
self.mi = MediaInfo()
def getInfos(self, mfile):
nfile = self.smbToUNC(mfile)
self.mi.Open(nfile)
width = self.mi.Get(Stream.Video, 0, "Width")
height = self.mi.Get(Stream.Video, 0, "Height")
ratio = self.mi.Get(Stream.Video, 0, "PixelAspectRatio")
dar = self.mi.Get(Stream.Video, 0, "DisplayAspectRatio")
fps = self.mi.Get(Stream.Video, 0, "FrameRate")
self.mi.Close()
try:
width = int(float(width))
height = int(float(height))
except:
width = int(0)
height = int(0)
try:
dar = float(dar)
except:
dar = float(0)
try:
fps = float(fps)
except:
fps = float(0)
return [width, height, 1, dar, fps]
def smbToUNC(self, smbFile):
testFile = smbFile[0:3]
newFile = ""
if testFile == "smb":
for i in xrange(0, len(smbFile)):
if smbFile[i] == "/":
newFile += "\\"
else:
newFile = newFile + smbFile[i]
retFile = newFile[4:]
else:
retFile = smbFile
return retFile
| AmbiBox/kodi.script.ambibox | resources/lib/media.py | Python | gpl-2.0 | 2,410 |
# coding: utf-8
from mongomock import MongoClient as MockMongoClient
from .base import *
# For tests, don't use KoBoCAT's DB
DATABASES = {
'default': dj_database_url.config(default='sqlite:///%s/db.sqlite3' % BASE_DIR),
}
DATABASE_ROUTERS = ['kpi.db_routers.TestingDatabaseRouter']
TESTING = True
# Decrease prod value to speed-up tests
SUBMISSION_LIST_LIMIT = 100
ENV = 'testing'
# Run all Celery tasks synchronously during testing
CELERY_TASK_ALWAYS_EAGER = True
MONGO_CONNECTION_URL = 'mongodb://fakehost/formhub_test'
MONGO_CONNECTION = MockMongoClient(
MONGO_CONNECTION_URL, j=True, tz_aware=True)
MONGO_DB = MONGO_CONNECTION['formhub_test']
| kobotoolbox/kpi | kobo/settings/testing.py | Python | agpl-3.0 | 664 |
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2015 Digi International Inc., All Rights Reserved.
#
import os
import dj_database_url
import binascii
import random
import sys
# Detect if we're in unit test mode
TESTING = 'test' in sys.argv
# Detect if we're running on Heroku
ON_HEROKU = 'DYNO' in os.environ and os.environ.get('HOME', "").startswith("/app")
# Django settings for xbeewifiapp project.
# By default, turn DEBUG off when running on Heroku
DEBUG = bool(os.environ.get('DJANGO_DEBUG', (not ON_HEROKU)))
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {}
if not ON_HEROKU:
DATABASES['default'] = {
# Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'local.sqlite'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
# Empty for localhost through domain sockets or '127.0.0.1' for
# localhost through TCP.
'HOST': '',
'PORT': '', # Set to empty string for default.
}
else:
# Parse database configuration from $DATABASE_URL
DATABASES['default'] = dj_database_url.config()
# Since the database is mostly reads (writes only performed for updating
# dashboards, session data and user info), setting autocommit will keep the
# overhead down.
DATABASES['default']['OPTIONS'] = {
'autocommit': True
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = 'staticfiles'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static'),
)
frontend_files_path = os.path.join(
os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'prod'),
'static')
if os.path.exists(frontend_files_path):
STATICFILES_DIRS = STATICFILES_DIRS + (frontend_files_path, )
frontend_files_path_dev = os.path.join(
os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'build'),
'static')
if os.path.exists(frontend_files_path_dev):
STATICFILES_DIRS = STATICFILES_DIRS + (frontend_files_path_dev, )
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'log_request_id.middleware.RequestIDMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'xbeewifiapp.apps.dashboard.middleware.DisableCSRF',
'xbeewifiapp.apps.dashboard.middleware.NoCacheApiMiddleware'
)
# If we're on Heroku (or not in unit test mode), force SSL. Needs to be first.
if ON_HEROKU or not TESTING:
force_ssl = ('sslify.middleware.SSLifyMiddleware',)
MIDDLEWARE_CLASSES = force_ssl + MIDDLEWARE_CLASSES
ROOT_URLCONF = 'xbeewifiapp.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'xbeewifiapp.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates". Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates'),
)
if os.path.exists(frontend_files_path):
TEMPLATE_DIRS = TEMPLATE_DIRS + (frontend_files_path, )
if os.path.exists(frontend_files_path_dev):
TEMPLATE_DIRS = TEMPLATE_DIRS + (frontend_files_path_dev, )
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'corsheaders',
'rest_framework',
'xbeewifiapp.apps.dashboard',
'xbeewifiapp.libs.digi',
)
# logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s: %(message)s'
},
'verbose': {
'format': '%(levelname)s [thread: %(thread)d, %(module)s:%(funcName)s, request: %(request_id)s]: %(message)s'
}
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'request_id': {
'()': 'log_request_id.filters.RequestIDFilter'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
# DEBUG: Turn on verbose logging
#'filters': ['request_id'],
#'formatter': 'verbose'
},
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'xbeewifiapp': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
# Disable output for unit test mode
if TESTING:
LOGGING['loggers']['xbeewifiapp']['handlers'] = ['null']
VERBOSE_LOGGING = bool(os.environ.get('VERBOSE_LOGGING', False))
if VERBOSE_LOGGING:
# DEBUG: Log all requests
LOG_REQUESTS=True
LOGGING['handlers']['console']['formatter'] = 'verbose'
LOGGING['handlers']['console']['filters'] = ['request_id']
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# Global default of read only unless authenticated. May be overridden or
# tightened further by individual views, but global default needed for
# api_root
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
# Only allow api access to authenticated clients
# Note: Order matters here. Listing session auth first will return 403's
# for # Unauthenticated requests, vs 401 (which may lead to browser popup)
# if basic auth
'DEFAULT_AUTHENTICATION_CLASSES': (
# Normal application flow and API browser will want to use session auth
'rest_framework.authentication.SessionAuthentication',
# Basic auth used primarily for standalone API testing
'rest_framework.authentication.BasicAuthentication',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
LIB_DIGI_DEVICECLOUD = {
# Use '#' as delimeter to parse username/cloud combo strings
'USERNAME_CLOUD_DELIMETER': '#',
# Set a default cloud fdqn if not provided
'DEFAULT_CLOUD_SERVER': 'login.etherios.com',
}
# Custom authentication backend for Device Cloud
AUTHENTICATION_BACKENDS = ('xbeewifiapp.libs.digi.auth.DeviceCloudBackend',)
# Custom Django user model
AUTH_USER_MODEL = 'digi.DeviceCloudUser'
# Redirect path for 'login_required'
LOGIN_URL = '/#/login'
# Redirect path for logouts
LOGOUT_URL = 'logout'
# Redirect after login if 'next' not specified
LOGIN_REDIRECT_URL = 'dashboard'
# Username/Password used for Device Cloud http monitors
SECRET_DEVICE_CLOUD_MONITOR_AUTH_USER = \
os.environ.get('DEVICE_CLOUD_MONITOR_AUTH_USER', "change")
SECRET_DEVICE_CLOUD_MONITOR_AUTH_PASS = \
os.environ.get('DEVICE_CLOUD_MONITOR_AUTH_PASS', "me")
# Supported Device Types (dpDeviceType) visible to frontend.
# Will be used to filter Device Cloud queries
SUPPORTED_DEVICE_TYPES = [
'XBee WiFi S6B TH', 'XBee WiFi S6B SMT']
# Django Secret Key
try:
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
except KeyError:
# Generate a new key for them using same method as
# django-admin startproject
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
SECRET_KEY = ''.join([random.choice(chars) for i in range(50)])
os.environ['DJANGO_SECRET_KEY'] = SECRET_KEY
# Crypto key used for encryption
try:
SECRET_CRYPTO_KEY = binascii.a2b_hex(os.environ['AES_CRYPTO_KEY_HEX'])
except KeyError:
# Generate a new key, 16 Bytes, stored as 32 char hex string
SECRET_CRYPTO_KEY = binascii.b2a_hex(os.urandom(16))
os.environ['AES_CRYPTO_KEY_HEX'] = SECRET_CRYPTO_KEY
# Tell Django to use the JUXD Test Suite Runner
TEST_RUNNER = 'juxd.JUXDTestSuiteRunner'
# Where to write the output
JUXD_FILENAME = os.path.join(
os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'test-reports'),
'backend.xml')
| brucetsao/xbeewificloudkit | xbeewifiapp/settings.py | Python | mpl-2.0 | 12,120 |
# force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../../../../../../../")
import GeneralUtil.python.PlotUtilities as pPlotUtil
import GeneralUtil.python.GenUtilities as pGenUtil
import matplotlib.gridspec as gridspec
import copy
def ReadNaCl(BaseFolder):
"""
See: ReadKCl, except with NaCl data instead
"""
files = sorted(pGenUtil.getAllFiles(BaseFolder,ext=".dat"))
data = []
for f in files:
# only care about third column ('C')
TmpColumn = np.loadtxt(f,skiprows=52)[:,2]
data.append(TmpColumn)
return data
def ReadKCl(BaseFolder):
"""
Args:
Basefolder: location of where the KCL .csv files are
Returns:
list, each element is the column of FRET values from all moleculars.
"""
files = sorted(pGenUtil.getAllFiles(BaseFolder,ext=".csv"))
data = []
for f in files:
data.append(np.loadtxt(f))
return data
def AxesDefault(ylim,title,ylabel,UseYTicks):
"""
Default options for stacking plots, for the current plot
Args:
ylim: limits for the y
title: for the subplot
ylabel: for the y axis
USeYTicks: if true, douesnt hide the y tickso r numbers
"""
ax = plt.gca()
plt.setp(ax.get_xticklabels(),visible=False)
if (not UseYTicks):
plt.setp(ax.get_yticklabels(),visible=False)
plt.ylim(ylim)
pPlotUtil.lazyLabel("",ylabel,title,frameon=True)
def run():
"""
Generates FRET histograms from NaCl and KCl data
"""
data = ReadNaCl("./Data/NaCl")
dataKcl = ReadKCl("./Data/KCl")
# assume number of histograms is the same
NumHists = len(data)
fig = pPlotUtil.figure(figsize=(12,16))
CommonStyle = dict(alpha=0.3,linewidth=0)
StyleDicts = [dict(color='m',label="0mM NaCl",**CommonStyle),
dict(color='r',label="1mM NaCl",**CommonStyle),
dict(color='k',label="10mM NaCl",**CommonStyle),
dict(color='c',label="25mM NaCl",**CommonStyle),
dict(color='g',label="50mM NaCl",**CommonStyle),
dict(color='b',label="100mM NaCl",**CommonStyle)]
# for style, just replace label 'NaCl' with 'KCL'
StyleDictKCL = copy.deepcopy(StyleDicts)
for i,TmpDict in enumerate(StyleDictKCL):
TmpDict['label'] = TmpDict['label'].replace("NaCl","KCl")
TmpDict['alpha'] = 0.7
TmpDict['linewidth'] = 0.5
# determine the bounds for the FRET
MinFret = -0.25
MaxFretFromData = np.max([max(arr) for arr in data])
MaxFret = 1.2
# assume a-priori knowledge of the bin counts
MinBin = 0
MaxBin = 140
StepFret = 0.01
ylim = [MinBin,MaxBin]
bins = np.arange(MinFret,MaxFret,StepFret)
for i,histogram in enumerate(data):
title = "High salt induces GQ folding" \
if i == 0 else ""
# plot the NaCl data
plt.subplot(NumHists,2,(2*i+1))
plt.hist(histogram,bins=bins,**StyleDicts[i])
AxesDefault(ylim,title,"Count",UseYTicks=True)
plt.subplot(NumHists,2,2*(i+1))
plt.hist(dataKcl[i],bins=bins,**StyleDictKCL[i])
AxesDefault(ylim,"","",UseYTicks=False)
# plot the KCl Data
plt.subplot(NumHists,2,11)
ax = plt.gca()
plt.setp(ax.get_xticklabels(),visible=True)
plt.setp(ax.get_yticklabels(),visible=True)
pPlotUtil.lazyLabel("FRET","Count","",frameon=True)
plt.subplot(NumHists,2,12)
ax = plt.gca()
plt.setp(ax.get_xticklabels(),visible=True)
pPlotUtil.lazyLabel("FRET","","",frameon=True)
pPlotUtil.savefig(fig,"./out.png")
if __name__ == "__main__":
run()
| prheenan/Research | Perkins/Projects/Conferences/2016_7_CPLC/Day2_FRET_Dynamics/HistogramPlotting/MainHistogramAnalysis.py | Python | gpl-3.0 | 3,830 |
import logging
from itertools import count
from hashlib import sha1
from sqlalchemy.sql import and_, expression
from sqlalchemy.schema import Column, Index
from sqlalchemy import alias
from dataset.persistence.util import guess_type, normalize_column_name
from dataset.persistence.util import ResultIter
from dataset.util import DatasetException
log = logging.getLogger(__name__)
class Table(object):
def __init__(self, database, table):
self.indexes = dict((i.name, i) for i in table.indexes)
self.database = database
self.table = table
self._is_dropped = False
@property
def columns(self):
"""
Get a listing of all columns that exist in the table.
"""
return list(self.table.columns.keys())
@property
def _normalized_columns(self):
return map(normalize_column_name, self.columns)
def drop(self):
"""
Drop the table from the database, deleting both the schema
and all the contents within it.
Note: the object will raise an Exception if you use it after
dropping the table. If you want to re-create the table, make
sure to get a fresh instance from the :py:class:`Database <dataset.Database>`.
"""
self.database._acquire()
self._is_dropped = True
self.database._tables.pop(self.table.name, None)
self.table.drop(self.database.engine)
self.database._release()
def _check_dropped(self):
if self._is_dropped:
raise DatasetException('the table has been dropped. this object should not be used again.')
def insert(self, row, ensure=True, types={}):
"""
Add a row (type: dict) by inserting it into the table.
If ``ensure`` is set, any of the keys of the row are not
table columns, they will be created automatically.
During column creation, ``types`` will be checked for a key
matching the name of a column to be created, and the given
SQLAlchemy column type will be used. Otherwise, the type is
guessed from the row value, defaulting to a simple unicode
field.
::
data = dict(title='I am a banana!')
table.insert(data)
Returns the inserted row's primary key.
"""
self._check_dropped()
if ensure:
self._ensure_columns(row, types=types)
res = self.database.executable.execute(self.table.insert(row))
if len(res.inserted_primary_key) > 0:
return res.inserted_primary_key[0]
def insert_many(self, rows, chunk_size=1000, ensure=True, types={}):
"""
Add many rows at a time, which is significantly faster than adding
them one by one. Per default the rows are processed in chunks of
1000 per commit, unless you specify a different ``chunk_size``.
See :py:meth:`insert() <dataset.Table.insert>` for details on
the other parameters.
::
rows = [dict(name='Dolly')] * 10000
table.insert_many(rows)
"""
def _process_chunk(chunk):
if ensure:
for row in chunk:
self._ensure_columns(row, types=types)
self.table.insert().execute(chunk)
self._check_dropped()
chunk = []
for i, row in enumerate(rows, start=1):
chunk.append(row)
if i % chunk_size == 0:
_process_chunk(chunk)
chunk = []
if chunk:
_process_chunk(chunk)
def update(self, row, keys, ensure=True, types={}):
"""
Update a row in the table. The update is managed via
the set of column names stated in ``keys``: they will be
used as filters for the data to be updated, using the values
in ``row``.
::
# update all entries with id matching 10, setting their title columns
data = dict(id=10, title='I am a banana!')
table.update(data, ['id'])
If keys in ``row`` update columns not present in the table,
they will be created based on the settings of ``ensure`` and
``types``, matching the behavior of :py:meth:`insert() <dataset.Table.insert>`.
"""
# check whether keys arg is a string and format as a list
if not isinstance(keys, (list, tuple)):
keys = [keys]
self._check_dropped()
if not keys or len(keys) == len(row):
return False
clause = [(u, row.get(u)) for u in keys]
if ensure:
self._ensure_columns(row, types=types)
# Don't update the key itself, so remove any keys from the row dict
clean_row = row.copy()
for key in keys:
if key in clean_row.keys():
del clean_row[key]
try:
filters = self._args_to_clause(dict(clause))
stmt = self.table.update(filters, clean_row)
rp = self.database.executable.execute(stmt)
return rp.rowcount
except KeyError:
return 0
def upsert(self, row, keys, ensure=True, types={}):
"""
An UPSERT is a smart combination of insert and update. If rows with matching ``keys`` exist
they will be updated, otherwise a new row is inserted in the table.
::
data = dict(id=10, title='I am a banana!')
table.upsert(data, ['id'])
"""
# check whether keys arg is a string and format as a list
if not isinstance(keys, (list, tuple)):
keys = [keys]
self._check_dropped()
if ensure:
self.create_index(keys)
filters = {}
for key in keys:
filters[key] = row.get(key)
res = self.find_one(**filters)
if res is not None:
row_count = self.update(row, keys, ensure=ensure, types=types)
if row_count == 0:
return False
elif row_count == 1:
try:
return res['id']
except KeyError:
return True
else:
return True
else:
return self.insert(row, ensure=ensure, types=types)
def delete(self, **_filter):
""" Delete rows from the table. Keyword arguments can be used
to add column-based filters. The filter criterion will always
be equality:
.. code-block:: python
table.delete(place='Berlin')
If no arguments are given, all records are deleted.
"""
self._check_dropped()
if _filter:
q = self._args_to_clause(_filter)
stmt = self.table.delete(q)
else:
stmt = self.table.delete()
rows = self.database.executable.execute(stmt)
return rows.rowcount > 0
def _ensure_columns(self, row, types={}):
# Keep order of inserted columns
for column in row.keys():
if normalize_column_name(column) in self._normalized_columns:
continue
if column in types:
_type = types[column]
else:
_type = guess_type(row[column])
log.debug("Creating column: %s (%s) on %r" % (column,
_type, self.table.name))
self.create_column(column, _type)
def _args_to_clause(self, args):
self._ensure_columns(args)
clauses = []
for k, v in args.items():
if isinstance(v, (list, tuple)):
clauses.append(self.table.c[k].in_(v))
else:
clauses.append(self.table.c[k] == v)
return and_(*clauses)
def create_column(self, name, type):
"""
Explicitely create a new column ``name`` of a specified type.
``type`` must be a `SQLAlchemy column type <http://docs.sqlalchemy.org/en/rel_0_8/core/types.html>`_.
::
table.create_column('created_at', sqlalchemy.DateTime)
"""
self._check_dropped()
self.database._acquire()
try:
if normalize_column_name(name) not in self._normalized_columns:
self.database.op.add_column(
self.table.name,
Column(name, type),
self.table.schema
)
self.table = self.database.update_table(self.table.name)
finally:
self.database._release()
def drop_column(self, name):
"""
Drop the column ``name``
::
table.drop_column('created_at')
"""
self._check_dropped()
self.database._acquire()
try:
if name in self.table.columns.keys():
self.database.op.drop_column(
self.table.name,
name
)
self.table = self.database.update_table(self.table.name)
finally:
self.database._release()
def create_index(self, columns, name=None):
"""
Create an index to speed up queries on a table. If no ``name`` is given a random name is created.
::
table.create_index(['name', 'country'])
"""
self._check_dropped()
if not name:
sig = '||'.join(columns)
# This is a work-around for a bug in <=0.6.1 which would create
# indexes based on hash() rather than a proper hash.
key = abs(hash(sig))
name = 'ix_%s_%s' % (self.table.name, key)
if name in self.indexes:
return self.indexes[name]
key = sha1(sig.encode('utf-8')).hexdigest()[:16]
name = 'ix_%s_%s' % (self.table.name, key)
if name in self.indexes:
return self.indexes[name]
try:
self.database._acquire()
columns = [self.table.c[c] for c in columns]
idx = Index(name, *columns)
idx.create(self.database.engine)
except:
idx = None
finally:
self.database._release()
self.indexes[name] = idx
return idx
def find_one(self, **kwargs):
"""
Works just like :py:meth:`find() <dataset.Table.find>` but returns one result, or None.
::
row = table.find_one(country='United States')
"""
kwargs['_limit'] = 1
iterator = self.find(**kwargs)
try:
return next(iterator)
except StopIteration:
return None
def _args_to_order_by(self, order_by):
if order_by[0] == '-':
return self.table.c[order_by[1:]].desc()
else:
return self.table.c[order_by].asc()
def find(self, _limit=None, _offset=0, _step=5000,
order_by='id', return_count=False, **_filter):
"""
Performs a simple search on the table. Simply pass keyword arguments as ``filter``.
::
results = table.find(country='France')
results = table.find(country='France', year=1980)
Using ``_limit``::
# just return the first 10 rows
results = table.find(country='France', _limit=10)
You can sort the results by single or multiple columns. Append a minus sign
to the column name for descending order::
# sort results by a column 'year'
results = table.find(country='France', order_by='year')
# return all rows sorted by multiple columns (by year in descending order)
results = table.find(order_by=['country', '-year'])
By default :py:meth:`find() <dataset.Table.find>` will break the
query into chunks of ``_step`` rows to prevent huge tables
from being loaded into memory at once.
For more complex queries, please use :py:meth:`db.query() <dataset.Database.query>`
instead."""
self._check_dropped()
if not isinstance(order_by, (list, tuple)):
order_by = [order_by]
order_by = [o for o in order_by if (o.startswith('-') and o[1:] or o) in self.table.columns]
order_by = [self._args_to_order_by(o) for o in order_by]
args = self._args_to_clause(_filter)
# query total number of rows first
count_query = alias(self.table.select(whereclause=args, limit=_limit, offset=_offset),
name='count_query_alias').count()
rp = self.database.executable.execute(count_query)
total_row_count = rp.fetchone()[0]
if return_count:
return total_row_count
if _limit is None:
_limit = total_row_count
if _step is None or _step is False or _step == 0:
_step = total_row_count
if total_row_count > _step and not order_by:
_step = total_row_count
log.warn("query cannot be broken into smaller sections because it is unordered")
queries = []
for i in count():
qoffset = _offset + (_step * i)
qlimit = min(_limit - (_step * i), _step)
if qlimit <= 0:
break
queries.append(self.table.select(whereclause=args, limit=qlimit,
offset=qoffset, order_by=order_by))
return ResultIter((self.database.executable.execute(q) for q in queries),
row_type=self.database.row_type)
def count(self, **_filter):
"""
Return the count of results for the given filter set (same filter options as with ``find()``).
"""
return self.find(return_count=True, **_filter)
def __len__(self):
"""
Returns the number of rows in the table.
"""
return self.count()
def distinct(self, *columns, **_filter):
"""
Returns all rows of a table, but removes rows in with duplicate values in ``columns``.
Interally this creates a `DISTINCT statement <http://www.w3schools.com/sql/sql_distinct.asp>`_.
::
# returns only one row per year, ignoring the rest
table.distinct('year')
# works with multiple columns, too
table.distinct('year', 'country')
# you can also combine this with a filter
table.distinct('year', country='China')
"""
self._check_dropped()
qargs = []
try:
columns = [self.table.c[c] for c in columns]
for col, val in _filter.items():
qargs.append(self.table.c[col] == val)
except KeyError:
return []
q = expression.select(columns, distinct=True,
whereclause=and_(*qargs),
order_by=[c.asc() for c in columns])
return self.database.query(q)
def __getitem__(self, item):
""" This is an alias for distinct which allows the table to be queried as using
square bracket syntax.
::
# Same as distinct:
print list(table['year'])
"""
if not isinstance(item, tuple):
item = item,
return self.distinct(*item)
def all(self):
"""
Returns all rows of the table as simple dictionaries. This is simply a shortcut
to *find()* called with no arguments.
::
rows = table.all()"""
return self.find()
def __iter__(self):
"""
Allows for iterating over all rows in the table without explicetly
calling :py:meth:`all() <dataset.Table.all>`.
::
for row in table:
print(row)
"""
return self.all()
def __repr__(self):
return '<Table(%s)>' % self.table.name
| askebos/dataset | dataset/persistence/table.py | Python | mit | 15,871 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
from abc import abstractmethod
from twitter.common.collections import OrderedSet
from pants.backend.core.tasks.task import Task
from pants.base.address import SyntheticAddress
from pants.base.address_lookup_error import AddressLookupError
from pants.base.build_environment import get_buildroot
from pants.base.build_graph import sort_targets
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.util.dirutil import safe_rmtree, safe_walk
from pants.util.memo import memoized_property
from pants.util.meta import AbstractClass
logger = logging.getLogger(__name__)
class SimpleCodegenTask(Task):
"""A base-class for code generation for a single target language."""
@classmethod
def product_types(cls):
# NB(gmalmquist): This is a hack copied from the old CodeGen base class to get the round manager
# to properly run codegen before resolve and compile. It would be more correct to just have each
# individual codegen class declare what languages it generates, but would cause problems with
# scala. See https://rbcommons.com/s/twitter/r/2540/.
return ['java', 'scala', 'python']
@classmethod
def register_options(cls, register):
super(SimpleCodegenTask, cls).register_options(register)
register('--allow-empty', action='store_true', default=True, fingerprint=True,
help='Skip targets with no sources defined.',
advanced=True)
strategy_names = [strategy.name() for strategy in cls.supported_strategy_types()]
if cls.forced_codegen_strategy() is None:
register('--strategy', choices=strategy_names, fingerprint=True,
default=strategy_names[0],
help='Selects the compilation strategy to use. The "global" strategy uses a shared '
'global directory for all generated code, and the "isolated" strategy uses '
'per-target codegen directories.',
advanced=True)
if 'isolated' in strategy_names:
register('--allow-dups', action='store_true', default=False, fingerprint=True,
help='Allow multiple targets specifying the same sources when using the isolated '
'strategy. If duplicates are allowed, the logic of find_sources in '
'IsolatedCodegenStrategy will associate generated sources with '
'least-dependent targets that generate them.',
advanced=True)
@classmethod
def get_fingerprint_strategy(cls):
"""Override this method to use a fingerprint strategy other than the default one.
:return: a fingerprint strategy, or None to use the default strategy.
"""
return None
def synthetic_target_extra_dependencies(self, target):
"""Gets any extra dependencies generated synthetic targets should have.
This method is optional for subclasses to implement, because some code generators may have no
extra dependencies.
:param Target target: the Target from which we are generating a synthetic Target. E.g., 'target'
might be a JavaProtobufLibrary, whose corresponding synthetic Target would be a JavaLibrary.
It may not be necessary to use this parameter depending on the details of the subclass.
:return: a list of dependencies.
"""
return []
@property
def synthetic_target_type(self):
"""The type of target this codegen task generates.
For example, the target type for JaxbGen would simply be JavaLibrary.
:return: a type (class) that inherits from Target.
"""
raise NotImplementedError
def is_gentarget(self, target):
"""Predicate which determines whether the target in question is relevant to this codegen task.
E.g., the JaxbGen task considers JaxbLibrary targets to be relevant, and nothing else.
:param Target target: The target to check.
:return: True if this class can generate code for the given target, False otherwise.
"""
raise NotImplementedError
def execute_codegen(self, invalid_targets):
"""Generated code for the given list of targets.
:param invalid_targets: an iterable of targets (a subset of codegen_targets()).
"""
raise NotImplementedError
def codegen_targets(self):
"""Finds codegen targets in the dependency graph.
:return: an iterable of dependency targets.
"""
return self.context.targets(self.is_gentarget)
@classmethod
def supported_strategy_types(cls):
"""The CodegenStrategy subclasses that this codegen task supports.
This list is used to generate the options for the --strategy flag. The first item in the list
is used as the default value.
By default, this only supports the IsolatedCodegenStrategy. Subclasses which desire global
generation should subclass the GlobalCodegenStrategy.
:return: the list of types (classes, not instances) that extend from CodegenStrategy.
:rtype: list
"""
return [cls.IsolatedCodegenStrategy]
@classmethod
def forced_codegen_strategy(cls):
"""If only a single codegen strategy is supported, returns its name.
This value of this function is automatically computed from the supported_strategy_types.
:return: the forced code generation strategy, or None if multiple options are supported.
"""
strategy_types = cls.supported_strategy_types()
if not strategy_types:
raise TaskError("{} doesn't support any codegen strategies.".format(cls.__name__))
if len(strategy_types) == 1:
return strategy_types[0].name()
return None
@classmethod
def _codegen_strategy_map(cls):
"""Returns a dict which maps names to codegen strategy types.
This is generated from the supported_strategy_types list.
"""
return {strategy.name(): strategy for strategy in cls.supported_strategy_types()}
def _codegen_strategy_for_name(self, name):
strategy_type_map = self._codegen_strategy_map()
if name not in strategy_type_map:
raise self.UnsupportedStrategyError('Unsupported codegen strategy "{}".'.format(name))
return strategy_type_map[name](self)
@memoized_property
def codegen_strategy(self):
"""Returns the codegen strategy object used by this codegen.
This is controlled first by the forced_codegen_strategy method, then by user-specified
options if the former returns None.
If you just want the name ('global' or 'isolated') of the strategy, use codegen_strategy.name().
:return: the codegen strategy object.
:rtype: SimpleCodegenTask.CodegenStrategy
"""
strategy = self.forced_codegen_strategy()
if strategy is None:
strategy = self.get_options().strategy
return self._codegen_strategy_for_name(strategy)
def codegen_workdir(self, target):
"""The path to the directory code should be generated in.
E.g., this might be something like /home/user/repo/.pants.d/gen/jaxb/...
Generally, subclasses should not need to override this method. If they do, it is crucial that
the implementation is /deterministic/ -- that is, the return value of this method should always
be the same for the same input target.
:param Target target: the codegen target (e.g., a java_protobuf_library).
:return: The absolute file path.
"""
return os.path.join(self.workdir, self.codegen_strategy.codegen_workdir_suffix(target))
def validate_sources_present(self, sources, targets):
"""Checks whether sources is empty, and either raises a TaskError or just returns False.
The specifics of this behavior are defined by whether the user sets --allow-empty to True/False:
--allow-empty=False will result in a TaskError being raised in the event of an empty source
set. If --allow-empty=True, this method will just return false and log a warning.
Shared for all SimpleCodegenTask subclasses to help keep errors consistent and descriptive.
:param sources: the sources from the given targets.
:param targets: the targets the sources are from, included just for error message generation.
:return: True if sources is not empty, False otherwise.
"""
if not sources:
formatted_targets = '\n'.join([t.address.spec for t in targets])
message = ('Had {count} targets but no sources?\n targets={targets}'
.format(count=len(targets), targets=formatted_targets))
if not self.get_options().allow_empty:
raise TaskError(message)
else:
logging.warn(message)
return False
return True
def execute(self):
targets = self.codegen_targets()
with self.invalidated(targets,
invalidate_dependents=True,
fingerprint_strategy=self.get_fingerprint_strategy()) as invalidation_check:
invalid_targets = OrderedSet()
for vts in invalidation_check.invalid_vts:
invalid_targets.update(vts.targets)
self.codegen_strategy.execute_codegen(invalid_targets)
invalid_vts_by_target = dict([(vt.target, vt) for vt in invalidation_check.invalid_vts])
vts_artifactfiles_pairs = []
for target in targets:
target_workdir = self.codegen_workdir(target)
synthetic_name = target.id
sources_rel_path = os.path.relpath(target_workdir, get_buildroot())
synthetic_address = SyntheticAddress(sources_rel_path, synthetic_name)
raw_generated_sources = list(self.codegen_strategy.find_sources(target))
# Make the sources robust regardless of whether subclasses return relative paths, or
# absolute paths that are subclasses of the workdir.
generated_sources = [src if src.startswith(target_workdir)
else os.path.join(target_workdir, src)
for src in raw_generated_sources]
relative_generated_sources = [os.path.relpath(src, target_workdir)
for src in generated_sources]
self.target = self.context.add_new_target(
address=synthetic_address,
target_type=self.synthetic_target_type,
dependencies=self.synthetic_target_extra_dependencies(target),
sources_rel_path=sources_rel_path,
sources=relative_generated_sources,
derived_from=target,
provides=target.provides,
)
synthetic_target = self.target
build_graph = self.context.build_graph
# NB(pl): This bypasses the convenience function (Target.inject_dependency) in order
# to improve performance. Note that we can walk the transitive dependee subgraph once
# for transitive invalidation rather than walking a smaller subgraph for every single
# dependency injected.
for dependent_address in build_graph.dependents_of(target.address):
build_graph.inject_dependency(
dependent=dependent_address,
dependency=synthetic_target.address,
)
# NB(pl): See the above comment. The same note applies.
for concrete_dependency_address in build_graph.dependencies_of(target.address):
build_graph.inject_dependency(
dependent=synthetic_target.address,
dependency=concrete_dependency_address,
)
build_graph.walk_transitive_dependee_graph(
build_graph.dependencies_of(target.address),
work=lambda t: t.mark_transitive_invalidation_hash_dirty(),
)
if target in self.context.target_roots:
self.context.target_roots.append(synthetic_target)
if target in invalid_vts_by_target:
vts_artifactfiles_pairs.append((invalid_vts_by_target[target], generated_sources))
if self.artifact_cache_writes_enabled():
self.update_artifact_cache(vts_artifactfiles_pairs)
def resolve_deps(self, unresolved_deps):
deps = OrderedSet()
for dep in unresolved_deps:
try:
deps.update(self.context.resolve(dep))
except AddressLookupError as e:
raise self.DepLookupError('{message}\n on dependency {dep}'.format(message=e, dep=dep))
return deps
class CodegenStrategy(AbstractClass):
"""Abstract strategies for running codegen.
Includes predicting generated sources, partitioning targets for execution, etc.
"""
@classmethod
def name(self):
"""The name of this strategy (eg, 'isolated').
This is used for generating the list of valid options for the --strategy flag.
"""
raise NotImplementedError
def _do_execute_codegen(self, targets):
"""Invokes the task's execute_codegen on the targets """
try:
self._task.execute_codegen(targets)
except Exception as ex:
for target in targets:
self._task.context.log.error('Failed to generate target: {}'.format(target.address.spec))
raise TaskError(ex)
@abstractmethod
def execute_codegen(self, targets):
"""Invokes _do_execute_codegen on the targets.
Subclasses decide how the targets are partitioned before being sent to the task's
execute_codegen method.
:targets: a set of targets.
"""
@abstractmethod
def find_sources(self, target):
"""Finds (or predicts) the sources generated by the given target."""
@abstractmethod
def codegen_workdir_suffix(self, target):
"""The working directory suffix for the given target's generated code."""
def __str__(self):
return self.name()
class GlobalCodegenStrategy(CodegenStrategy):
"""Code generation strategy which generates all code together, in base directory."""
def __init__(self, task):
self._task = task
@classmethod
def name(cls):
return 'global'
def execute_codegen(self, targets):
with self._task.context.new_workunit(name='execute', labels=[WorkUnitLabel.MULTITOOL]):
self._do_execute_codegen(targets)
@abstractmethod
def find_sources(self, target):
"""Predicts what sources the codegen target will generate.
The exact implementation of this is left to the GlobalCodegenStrategy subclass.
:param Target target: the target for which to find generated sources.
:return: a set of relative filepaths.
:rtype: OrderedSet
"""
def codegen_workdir_suffix(self, target):
return self.name()
class IsolatedCodegenStrategy(CodegenStrategy):
"""Code generate strategy which generates the code for each target separately.
Code is generated in a unique parent directory per target.
"""
def __init__(self, task):
self._task = task
# NOTE(gm): This memoization yields a ~10% performance increase on my machine.
self._generated_sources_cache = {}
@classmethod
def name(cls):
return 'isolated'
def execute_codegen(self, targets):
with self._task.context.new_workunit(name='execute', labels=[WorkUnitLabel.MULTITOOL]):
ordered = [target for target in reversed(sort_targets(targets)) if target in targets]
for target in ordered:
with self._task.context.new_workunit(name=target.address.spec):
# TODO(gm): add a test-case to ensure this is correctly eliminating stale generated code.
safe_rmtree(self._task.codegen_workdir(target))
self._do_execute_codegen([target])
def find_sources(self, target):
"""Determines what sources were generated by the target after the fact.
This is done by searching the directory where this target's code was generated. This is only
possible because each target has its own unique directory in this CodegenStrategy.
:param Target target: the target for which to find generated sources.
:return: a set of relative filepaths.
:rtype: OrderedSet
"""
return self._find_sources_strictly_generated_by_target(target)
def codegen_workdir_suffix(self, target):
return os.path.join(self.name(), target.id)
def _find_sources_generated_by_target(self, target):
if target.id in self._generated_sources_cache:
for source in self._generated_sources_cache[target.id]:
yield source
return
target_workdir = self._task.codegen_workdir(target)
if not os.path.exists(target_workdir):
return
for root, dirs, files in safe_walk(target_workdir):
for name in files:
yield os.path.join(root, name)
def _find_sources_generated_by_dependencies(self, target):
sources = OrderedSet()
def add_sources(dep):
if dep is not target:
dep_sources = self._find_sources_generated_by_target(dep)
dep_sources = [self._relative_source(dep, source) for source in dep_sources]
sources.update(dep_sources)
target.walk(add_sources)
return sources
def _relative_source(self, target, source):
return os.path.relpath(source, self._task.codegen_workdir(target))
def _find_sources_strictly_generated_by_target(self, target):
# NB(gm): Some code generators may re-generate code that their dependent libraries generate.
# This results in targets claiming to generate sources that they really don't, so we try to
# filter out sources that were actually generated by dependencies of the target. This causes
# the code generated by the dependencies to 'win' over the code generated by dependees. By
# default, this behavior is disabled, and duplication in generated sources will raise a
# TaskError. This is controlled by the --allow-dups flag.
if target.id in self._generated_sources_cache:
return self._generated_sources_cache[target.id]
by_target = OrderedSet(self._find_sources_generated_by_target(target))
by_dependencies = self._find_sources_generated_by_dependencies(target)
strict = [t for t in by_target if self._relative_source(target, t) not in by_dependencies]
if len(strict) != len(by_target):
messages = ['{target} generated sources that had already been generated by dependencies.'
.format(target=target.address.spec)]
# Doing some extra work for the sake of helpful error messages.
duplicate_sources = set([self._relative_source(target, source)
for source in sorted(set(by_target) - set(strict))])
duplicates_by_targets = {}
def record_duplicates(dep):
if dep == target:
return
sources = [self._relative_source(dep, s)
for s in self._find_sources_generated_by_target(dep)]
sources = [s for s in sources if s in duplicate_sources]
if sources:
duplicates_by_targets[dep] = sources
target.walk(record_duplicates)
for dependency in sorted(duplicates_by_targets, key=lambda t: t.address.spec):
messages.append('\t{} also generated:'.format(dependency.address.spec))
messages.extend(['\t\t{}'.format(source) for source in duplicates_by_targets[dependency]])
message = '\n'.join(messages)
if self._task.get_options().allow_dups:
logger.warn(message)
else:
raise self.DuplicateSourceError(message)
self._generated_sources_cache[target.id] = strict
return strict
class DuplicateSourceError(TaskError):
"""A target generated the same code that was generated by one of its dependencies.
This is only thrown when --allow-dups=False.
"""
class UnsupportedStrategyError(TaskError):
"""Generated when there is no strategy for a given name."""
| areitz/pants | src/python/pants/backend/codegen/tasks/simple_codegen_task.py | Python | apache-2.0 | 19,854 |
class Car(object):
def __init__(self, name='General', model='GM', vehicle_type=None):
self.speed = 0
self.name = name
self.model = model
self.vehicle_type = vehicle_type
if self.name in ['Porshe', 'Koenigsegg']:
self.num_of_doors = 2
else:
self.num_of_doors = 4
if self.vehicle_type == 'trailer':
self.num_of_wheels = 8
else:
self.num_of_wheels = 4
def is_saloon(self):
if self.vehicle_type is not 'trailer':
self.vehicle_type == 'saloon'
return True
return False
def drive(self, moving_speed):
if moving_speed == 3:
Car.speed = 1000
elif moving_speed == 7:
Car.speed = 77
return self
man = Car('MAN', 'Truck', 'trailer')
koenigsegg = Car('Koenigsegg', 'Agera R')
[man.num_of_wheels, koenigsegg.num_of_wheels]
| anthonyndunguwanja/Anthony-Ndungu-bootcamp-17 | Day 2/Car_Class_Lab.py | Python | mit | 825 |
Subsets and Splits