repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
joelsmith/openshift-tools | ansible/roles/lib_git/library/git_merge.py | 1 | 14419 | #!/usr/bin/env python
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
"""Run an ssh agent and set SSH_AUTH_SOCK so that clients will use it
Example:
with ssh_agent.SshAgent() as agent:
agent.add_key(private_key_string)
# do ssh stuff
# as agent loses scope, the ssh agent is killed
"""
from __future__ import with_statement
import atexit
import tempfile
import os
import sys
import shutil
import subprocess
import random
import time
import datetime
class SshAgentException(Exception):
"""An exception thrown for problems in SshAgent
"""
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(SshAgentException, self).__init__(message)
class SshAgent(object):
"""Run an ssh agent and set SSH_AUTH_SOCK so that clients will use it.
The running agent can have one or more keys added (via the SshAgent.add_key()
method or via any other method that can find and talk to the running agent.
"""
class Cleanup(object):
"""A helper functor class for SshAgent
An object of this class can be passed
directly to atexit, which will call __call__() when the
program exits
"""
def __init__(self, ssh_agent, ssh_auth_sock_dir):
self.ssh_agent = ssh_agent
self.ssh_auth_sock_dir = ssh_auth_sock_dir
self.cleaned_up = False
self.original_env_var = os.environ.get('SSH_AUTH_SOCK')
def __call__(self):
if self.cleaned_up:
return
self.cleaned_up = True
try:
shutil.rmtree(self.ssh_auth_sock_dir, ignore_errors=True)
except OSError:
pass
try:
self.ssh_agent.kill()
except OSError:
pass
if self.original_env_var:
os.environ['SSH_AUTH_SOCK'] = self.original_env_var
else:
del os.environ['SSH_AUTH_SOCK']
def pass_(self):
"""A function to appease pylint"""
pass
def pass__(self):
"""Another function to appease pylint"""
self.pass_()
def __init__(self):
devnull = open(os.devnull, 'w')
# Start an ssh-agent process and register it to be killed atexit
self.ssh_auth_sock_dir = tempfile.mkdtemp(prefix=os.path.basename(sys.argv[0]) + '.')
self.ssh_auth_sock = os.path.join(self.ssh_auth_sock_dir, "ssh_agent")
self.ssh_agent = subprocess.Popen(["ssh-agent", "-d", "-a", self.ssh_auth_sock], stdout=devnull, stderr=devnull)
self.cleanup = self.Cleanup(self.ssh_agent, self.ssh_auth_sock_dir)
# this is here so that when python exits, we make sure that the agent is killed
# (in case python exits before our __del__() is called
atexit.register(self.cleanup)
os.environ["SSH_AUTH_SOCK"] = self.ssh_auth_sock
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tback):
self.cleanup()
def __del__(self):
self.cleanup()
def kill(self):
'''Explicitly kill the running ssh-agent
It's not necessary to call this function as the agent
will be cleaned up automatically.
'''
self.cleanup()
def add_key(self, key):
"""Add a key to the running agent.
Note:
This function can be called any number of times to add multiple keys.
Args:
key (str): A string containing the ssh private key to be added (the
actual key data, not the filename of a key)
Raises:
SshAgentException: when ssh-add does not immediately return (as in the
case of a private key with a passphrase)
"""
#if self.ssh_agent.poll() is None:
# raise SshAgentException("Unable to add ssh key. Did agent die?")
named_pipe_path = os.path.join(self.ssh_auth_sock_dir, "keypipe." + str(random.getrandbits(64)))
try:
os.mkfifo(named_pipe_path, 0600)
except OSError, exception:
print "Failed to create FIFO: %s" % exception
devnull = open(os.devnull, 'w')
ssh_add = subprocess.Popen(["ssh-add", named_pipe_path], stdout=devnull, stderr=devnull)
fifo = open(named_pipe_path, 'w')
print >> fifo, key
fifo.close()
#Popen.wait() doesn't have a timeout, so we'll implement one using poll() :(
start_time = datetime.datetime.now()
while ssh_add.poll() is None:
if (datetime.datetime.now() - start_time).total_seconds() > 5:
try:
ssh_add.kill()
except OSError:
pass
raise SshAgentException("Unable to add ssh key. Timed out. Does key have a passphrase?")
time.sleep(0.1)
os.remove(named_pipe_path)
# pylint: disable=too-many-lines
# these are already imported inside of the ssh library
#import os
#import subprocess
class GitCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class GitCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
path,
verbose=False,
ssh_key=None,
author=None):
''' Constructor for GitCLI '''
self.path = path
self.verbose = verbose
self.ssh_key = ssh_key
self.author = author
self.environment_vars = os.environ.copy()
if self.author:
author_dict = {}
author_list = author.split('<')
author_dict['GIT_COMMITTER_NAME'] = author_list[0].strip()
author_dict['GIT_COMMITTER_EMAIL'] = author_list[0].strip()
self.environment_vars.update(author_dict)
def _add(self, files_to_add=None):
''' git add '''
cmd = ["add", "--no-ignore-removal"]
if files_to_add:
cmd.extend(files_to_add)
else:
cmd.append('.')
results = self.git_cmd(cmd)
return results
def _commit(self, msg, author=None):
''' git commit with message '''
cmd = ["commit", "-m", msg]
if author:
cmd += ["--author", author]
results = self.git_cmd(cmd)
return results
def _clone(self, repo, dest, bare=False):
''' git clone '''
cmd = ["clone"]
if bare:
cmd += ["--bare"]
cmd += [repo, dest]
results = self.git_cmd(cmd)
return results
def _status(self, porcelain=False, show_untracked=True):
''' Do a git status '''
cmd = ["status"]
if porcelain:
cmd.append('--porcelain')
if show_untracked:
cmd.append('--untracked-files=normal')
else:
cmd.append('--untracked-files=no')
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _checkout(self, branch):
''' Do a git checkout to <branch> '''
cmd = ["checkout", branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _get_current_branch(self):
''' Do a git checkout to <branch> '''
cmd = ["describe", "--contains", "--all", "HEAD"]
results = self.git_cmd(cmd, output=True, output_type='raw')
results['results'] = results['results'].rstrip()
return results
def _merge(self, merge_id):
''' Do a git checkout to <branch> '''
cmd = ["merge", merge_id]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _push(self, remote, src_branch, dest_branch):
''' Do a git checkout to <branch> '''
push_branches = src_branch + ":" + dest_branch
cmd = ["push", remote, push_branches]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _remote_update(self):
''' Do a git remote update '''
cmd = ["remote", "update"]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _diff(self, diff_branch):
''' Do a git diff diff_branch'''
cmd = ["diff", diff_branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _rebase(self, rebase_branch):
''' Do a git rebase rebase_branch'''
cmd = ["rebase", rebase_branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _config(self, get_args):
''' Do a git config --get <get_args> '''
cmd = ["config", '--get', get_args]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def git_cmd(self, cmd, output=False, output_type='json'):
'''Base command for git '''
cmds = ['/usr/bin/git']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
if self.ssh_key:
with SshAgent() as agent:
self.environment_vars['SSH_AUTH_SOCK'] = os.environ['SSH_AUTH_SOCK']
agent.add_key(self.ssh_key)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.environment_vars)
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
else:
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.environment_vars)
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class GitMerge(GitCLI):
''' Class to wrap the git merge line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
path,
merge_id,
branch,
author=None):
''' Constructor for GitCommit '''
super(GitMerge, self).__init__(path, author=author)
self.path = path
self.merge_id = merge_id
self.branch = branch
self.author = author
self.debug = []
os.chdir(path)
def checkout_branch(self):
''' check out the desired branch '''
current_branch = self._get_current_branch()
if current_branch['results'] == self.branch:
return True
results = self._checkout(self.branch)
self.debug.append(results)
if results['returncode'] == 0:
return True
return False
def merge(self):
'''perform a git merge '''
if self.checkout_branch():
merge_results = self._merge(self.merge_id)
merge_results['debug'] = self.debug
if 'Already up-to-date' in merge_results['results']:
merge_results['no_merge'] = True
return merge_results
return {'returncode': 1,
'results': {},
'end': 'yes',
'debug': self.debug
}
def main():
'''
ansible git module for merging
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str', choices=['present']),
path=dict(default=None, required=True, type='str'),
branch=dict(default=None, required=True, type='str'),
merge_id=dict(default=None, required=True, type='str'),
author=dict(default=None, required=False, type='str'),
),
supports_check_mode=False,
)
git = GitMerge(module.params['path'],
module.params['merge_id'],
module.params['branch'],
module.params['author']
)
state = module.params['state']
if state == 'present':
results = git.merge()
if results['returncode'] != 0:
module.fail_json(msg=results)
if results.has_key('no_merge'):
module.exit_json(changed=False, results=results, state="present")
module.exit_json(changed=True, results=results, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
| apache-2.0 | -3,352,005,166,285,744,000 | 30.209957 | 120 | 0.516679 | false |
ge0rgi/cinder | cinder/api/contrib/volume_transfer.py | 1 | 6224 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from six.moves import http_client
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import transfers as transfer_view
from cinder import exception
from cinder.i18n import _, _LI
from cinder import transfer as transferAPI
LOG = logging.getLogger(__name__)
class VolumeTransferController(wsgi.Controller):
"""The Volume Transfer API controller for the OpenStack API."""
_view_builder_class = transfer_view.ViewBuilder
def __init__(self):
self.transfer_api = transferAPI.API()
super(VolumeTransferController, self).__init__()
def show(self, req, id):
"""Return data about active transfers."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
transfer = self.transfer_api.get(context, transfer_id=id)
return self._view_builder.detail(req, transfer)
def index(self, req):
"""Returns a summary list of transfers."""
return self._get_transfers(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of transfers."""
return self._get_transfers(req, is_detail=True)
def _get_transfers(self, req, is_detail):
"""Returns a list of transfers, transformed through view builder."""
context = req.environ['cinder.context']
filters = req.params.copy()
LOG.debug('Listing volume transfers')
transfers = self.transfer_api.get_all(context, filters=filters)
transfer_count = len(transfers)
limited_list = common.limited(transfers, req)
if is_detail:
transfers = self._view_builder.detail_list(req, limited_list,
transfer_count)
else:
transfers = self._view_builder.summary_list(req, limited_list,
transfer_count)
return transfers
@wsgi.response(http_client.ACCEPTED)
def create(self, req, body):
"""Create a new volume transfer."""
LOG.debug('Creating new volume transfer %s', body)
self.assert_valid_body(body, 'transfer')
context = req.environ['cinder.context']
transfer = body['transfer']
try:
volume_id = transfer['volume_id']
except KeyError:
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
name = transfer.get('name', None)
if name is not None:
self.validate_string_length(name, 'Transfer name',
min_length=1, max_length=255,
remove_whitespaces=True)
name = name.strip()
LOG.info(_LI("Creating transfer of volume %s"),
volume_id)
try:
new_transfer = self.transfer_api.create(context, volume_id, name)
# Not found exception will be handled at the wsgi level
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
transfer = self._view_builder.create(req,
dict(new_transfer))
return transfer
@wsgi.response(http_client.ACCEPTED)
def accept(self, req, id, body):
"""Accept a new volume transfer."""
transfer_id = id
LOG.debug('Accepting volume transfer %s', transfer_id)
self.assert_valid_body(body, 'accept')
context = req.environ['cinder.context']
accept = body['accept']
try:
auth_key = accept['auth_key']
except KeyError:
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Accepting transfer %s"), transfer_id)
try:
accepted_transfer = self.transfer_api.accept(context, transfer_id,
auth_key)
except exception.VolumeSizeExceedsAvailableQuota as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.msg, headers={'Retry-After': '0'})
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
transfer = \
self._view_builder.summary(req,
dict(accepted_transfer))
return transfer
def delete(self, req, id):
"""Delete a transfer."""
context = req.environ['cinder.context']
LOG.info(_LI("Delete transfer with id: %s"), id)
# Not found exception will be handled at the wsgi level
self.transfer_api.delete(context, transfer_id=id)
return webob.Response(status_int=http_client.ACCEPTED)
class Volume_transfer(extensions.ExtensionDescriptor):
"""Volume transfer management support."""
name = "VolumeTransfer"
alias = "os-volume-transfer"
updated = "2013-05-29T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(Volume_transfer.alias,
VolumeTransferController(),
collection_actions={'detail':
'GET'},
member_actions={'accept': 'POST'})
resources.append(res)
return resources
| apache-2.0 | 8,315,506,419,070,124,000 | 35.611765 | 78 | 0.599293 | false |
Matir/LoginScan | core/__init__.py | 1 | 1413 | # Copyright (C) 2011 by David Tomaschik <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from core import config
def print_verbose(msg):
if config.config.get('verbose',False):
print_error(msg)
def print_error(msg):
sys.stderr.write(str(msg))
sys.stderr.write('\n')
sys.stderr.flush()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| mit | -1,753,436,630,876,571,100 | 40.558824 | 79 | 0.760793 | false |
patjouk/djangogirls | applications/migrations/0015_auto_20151203_1109.py | 1 | 1285 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0014_auto_20150814_0439'),
]
operations = [
migrations.AlterField(
model_name='application',
name='state',
field=models.CharField(max_length=50, verbose_name='State of the application', null=True, default='submitted', choices=[('submitted', 'Application submitted'), ('accepted', 'Application accepted'), ('rejected', 'Application rejected'), ('waitlisted', 'Application on waiting list'), ('declined', 'Applicant declined')]),
),
migrations.AlterField(
model_name='email',
name='recipients_group',
field=models.CharField(max_length=50, verbose_name='Recipients', help_text='Only people assigned to chosen group will receive this email.', choices=[('submitted', 'Application submitted'), ('accepted', 'Application accepted'), ('rejected', 'Application rejected'), ('waitlisted', 'Application on waiting list'), ('declined', 'Applicant declined'), ('waiting', 'RSVP: Waiting for response'), ('yes', 'RSVP: Confirmed attendance'), ('no', 'RSVP: Rejected invitation')]),
),
]
| bsd-3-clause | -8,440,414,282,770,642,000 | 52.541667 | 480 | 0.652918 | false |
aringh/odl | examples/space/simple_rn.py | 1 | 4104 | """An example of a very simple space, the space rn.
Including some benchmarks with an optimized version.
"""
import numpy as np
import odl
from odl.space.base_tensors import TensorSpace, Tensor
from odl.util.testutils import Timer
class SimpleRn(TensorSpace):
"""The real space R^n, non-optimized implmentation."""
def __init__(self, size):
super(SimpleRn, self).__init__(size, dtype=float)
def zero(self):
return self.element(np.zeros(self.size))
def one(self):
return self.element(np.ones(self.size))
def _lincomb(self, a, x1, b, x2, out):
out.data[:] = a * x1.data + b * x2.data
def _inner(self, x1, x2):
return float(np.vdot(x1.data, x2.data))
def _multiply(self, x1, x2, out):
out.data[:] = x1.data * x2.data
def _divide(self, x1, x2, out):
out.data[:] = x1.data / x2.data
def element(self, *args, **kwargs):
if not args and not kwargs:
return self.element(np.empty(self.size))
if isinstance(args[0], np.ndarray):
if args[0].shape == (self.size,):
return RnVector(self, args[0])
else:
raise ValueError('input array {} is of shape {}, expected '
'shape ({},).'.format(args[0], args[0].shape,
self.dim,))
else:
return self.element(np.array(
*args, **kwargs).astype(np.float64, copy=False))
return self.element(np.empty(self.dim, dtype=np.float64))
class RnVector(Tensor):
def __init__(self, space, data):
super(RnVector, self).__init__(space)
self.data = data
def __getitem__(self, index):
return self.data.__getitem__(index)
def __setitem__(self, index, value):
return self.data.__setitem__(index, value)
def asarray(self, *args):
return self.data(*args)
r5 = SimpleRn(5)
# odl.diagnostics.SpaceTest(r5).run_tests()
# Do some tests to compare
n = 10 ** 7
iterations = 10
# Perform some benchmarks with rn
opt_spc = odl.rn(n)
simple_spc = SimpleRn(n)
x, y, z = np.random.rand(n), np.random.rand(n), np.random.rand(n)
ox, oy, oz = (opt_spc.element(x.copy()), opt_spc.element(y.copy()),
opt_spc.element(z.copy()))
sx, sy, sz = (simple_spc.element(x.copy()), simple_spc.element(y.copy()),
simple_spc.element(z.copy()))
if 'cuda' in odl.space.entry_points.tensor_space_impl_names():
cu_spc = odl.rn(n, impl='cuda')
cx, cy, cz = (cu_spc.element(x.copy()), cu_spc.element(y.copy()),
cu_spc.element(z.copy()))
print(" lincomb:")
with Timer("SimpleRn"):
for _ in range(iterations):
simple_spc.lincomb(2.13, sx, 3.14, sy, out=sz)
print("result: {}".format(sz[1:5]))
with Timer("odl numpy"):
for _ in range(iterations):
opt_spc.lincomb(2.13, ox, 3.14, oy, out=oz)
print("result: {}".format(oz[1:5]))
if 'cuda' in odl.space.entry_points.tensor_space_impl_names():
with Timer("odl cuda"):
for _ in range(iterations):
cu_spc.lincomb(2.13, cx, 3.14, cy, out=cz)
print("result: {}".format(cz[1:5]))
print("\n Norm:")
with Timer("SimpleRn"):
for _ in range(iterations):
result = sz.norm()
print("result: {}".format(result))
with Timer("odl numpy"):
for _ in range(iterations):
result = oz.norm()
print("result: {}".format(result))
if 'cuda' in odl.space.entry_points.tensor_space_impl_names():
with Timer("odl cuda"):
for _ in range(iterations):
result = cz.norm()
print("result: {}".format(result))
print("\n Inner:")
with Timer("SimpleRn"):
for _ in range(iterations):
result = sz.inner(sx)
print("result: {}".format(result))
with Timer("odl numpy"):
for _ in range(iterations):
result = oz.inner(ox)
print("result: {}".format(result))
if 'cuda' in odl.space.entry_points.tensor_space_impl_names():
with Timer("odl cuda"):
for _ in range(iterations):
result = cz.inner(cx)
print("result: {}".format(result))
| mpl-2.0 | 2,834,706,837,063,019,500 | 28.52518 | 78 | 0.584064 | false |
nicozhang/pyspider | pyspider/fetcher/tornado_fetcher.py | 1 | 27165 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2012-12-17 11:07:19
from __future__ import unicode_literals
import os
import sys
import six
import copy
import time
import json
import logging
import traceback
import functools
import threading
import tornado.ioloop
import tornado.httputil
import tornado.httpclient
import pyspider
from six.moves import queue, http_cookies
from six.moves.urllib.robotparser import RobotFileParser
from requests import cookies
from six.moves.urllib.parse import urljoin, urlsplit
from tornado import gen
from tornado.curl_httpclient import CurlAsyncHTTPClient
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from pyspider.libs import utils, dataurl, counter
from pyspider.libs.url import quote_chinese
from .cookie_utils import extract_cookies_to_jar
logger = logging.getLogger('fetcher')
class MyCurlAsyncHTTPClient(CurlAsyncHTTPClient):
def free_size(self):
return len(self._free_list)
def size(self):
return len(self._curls) - self.free_size()
class MySimpleAsyncHTTPClient(SimpleAsyncHTTPClient):
def free_size(self):
return self.max_clients - self.size()
def size(self):
return len(self.active)
fetcher_output = {
"status_code": int,
"orig_url": str,
"url": str,
"headers": dict,
"content": str,
"cookies": dict,
}
class Fetcher(object):
user_agent = "pyspider/%s (+http://pyspider.org/)" % pyspider.__version__
default_options = {
'method': 'GET',
'headers': {
},
'use_gzip': True,
'timeout': 120,
'connect_timeout': 20,
}
phantomjs_proxy = None
splash_endpoint = None
splash_lua_source = open(os.path.join(os.path.dirname(__file__), "splash_fetcher.lua")).read()
robot_txt_age = 60*60 # 1h
def __init__(self, inqueue, outqueue, poolsize=100, proxy=None, async=True):
self.inqueue = inqueue
self.outqueue = outqueue
self.poolsize = poolsize
self._running = False
self._quit = False
self.proxy = proxy
self.async = async
self.ioloop = tornado.ioloop.IOLoop()
self.robots_txt_cache = {}
# binding io_loop to http_client here
if self.async:
self.http_client = MyCurlAsyncHTTPClient(max_clients=self.poolsize,
io_loop=self.ioloop)
else:
self.http_client = tornado.httpclient.HTTPClient(MyCurlAsyncHTTPClient, max_clients=self.poolsize)
self._cnt = {
'5m': counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(30, 10)),
'1h': counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(60, 60)),
}
def send_result(self, type, task, result):
'''Send fetch result to processor'''
if self.outqueue:
try:
self.outqueue.put((task, result))
except Exception as e:
logger.exception(e)
def fetch(self, task, callback=None):
if self.async:
return self.async_fetch(task, callback)
else:
return self.async_fetch(task, callback).result()
@gen.coroutine
def async_fetch(self, task, callback=None):
'''Do one fetch'''
url = task.get('url', 'data:,')
if callback is None:
callback = self.send_result
type = 'None'
start_time = time.time()
try:
if url.startswith('data:'):
type = 'data'
result = yield gen.maybe_future(self.data_fetch(url, task))
elif task.get('fetch', {}).get('fetch_type') in ('js', 'phantomjs'):
type = 'phantomjs'
result = yield self.phantomjs_fetch(url, task)
elif task.get('fetch', {}).get('fetch_type') in ('splash', ):
type = 'splash'
result = yield self.splash_fetch(url, task)
else:
type = 'http'
result = yield self.http_fetch(url, task)
except Exception as e:
logger.exception(e)
result = self.handle_error(type, url, task, start_time, e)
callback(type, task, result)
self.on_result(type, task, result)
raise gen.Return(result)
def sync_fetch(self, task):
'''Synchronization fetch, usually used in xmlrpc thread'''
if not self._running:
return self.ioloop.run_sync(functools.partial(self.async_fetch, task, lambda t, _, r: True))
wait_result = threading.Condition()
_result = {}
def callback(type, task, result):
wait_result.acquire()
_result['type'] = type
_result['task'] = task
_result['result'] = result
wait_result.notify()
wait_result.release()
wait_result.acquire()
self.ioloop.add_callback(self.fetch, task, callback)
while 'result' not in _result:
wait_result.wait()
wait_result.release()
return _result['result']
def data_fetch(self, url, task):
'''A fake fetcher for dataurl'''
self.on_fetch('data', task)
result = {}
result['orig_url'] = url
result['content'] = dataurl.decode(url)
result['headers'] = {}
result['status_code'] = 200
result['url'] = url
result['cookies'] = {}
result['time'] = 0
result['save'] = task.get('fetch', {}).get('save')
if len(result['content']) < 70:
logger.info("[200] %s:%s %s 0s", task.get('project'), task.get('taskid'), url)
else:
logger.info(
"[200] %s:%s data:,%s...[content:%d] 0s",
task.get('project'), task.get('taskid'),
result['content'][:70],
len(result['content'])
)
return result
def handle_error(self, type, url, task, start_time, error):
result = {
'status_code': getattr(error, 'code', 599),
'error': utils.text(error),
'traceback': traceback.format_exc() if sys.exc_info()[0] else None,
'content': "",
'time': time.time() - start_time,
'orig_url': url,
'url': url,
"save": task.get('fetch', {}).get('save')
}
logger.error("[%d] %s:%s %s, %r %.2fs",
result['status_code'], task.get('project'), task.get('taskid'),
url, error, result['time'])
return result
allowed_options = ['method', 'data', 'timeout', 'cookies', 'use_gzip', 'validate_cert']
def pack_tornado_request_parameters(self, url, task):
fetch = copy.deepcopy(self.default_options)
fetch['url'] = url
fetch['headers'] = tornado.httputil.HTTPHeaders(fetch['headers'])
fetch['headers']['User-Agent'] = self.user_agent
task_fetch = task.get('fetch', {})
for each in self.allowed_options:
if each in task_fetch:
fetch[each] = task_fetch[each]
fetch['headers'].update(task_fetch.get('headers', {}))
if task.get('track'):
track_headers = tornado.httputil.HTTPHeaders(
task.get('track', {}).get('fetch', {}).get('headers') or {})
track_ok = task.get('track', {}).get('process', {}).get('ok', False)
else:
track_headers = {}
track_ok = False
# proxy
proxy_string = None
if isinstance(task_fetch.get('proxy'), six.string_types):
proxy_string = task_fetch['proxy']
elif self.proxy and task_fetch.get('proxy', True):
proxy_string = self.proxy
if proxy_string:
if '://' not in proxy_string:
proxy_string = 'http://' + proxy_string
proxy_splited = urlsplit(proxy_string)
fetch['proxy_host'] = proxy_splited.hostname
if proxy_splited.username:
fetch['proxy_username'] = proxy_splited.username
if proxy_splited.password:
fetch['proxy_password'] = proxy_splited.password
if six.PY2:
for key in ('proxy_host', 'proxy_username', 'proxy_password'):
if key in fetch:
fetch[key] = fetch[key].encode('utf8')
fetch['proxy_port'] = proxy_splited.port or 8080
# etag
if task_fetch.get('etag', True):
_t = None
if isinstance(task_fetch.get('etag'), six.string_types):
_t = task_fetch.get('etag')
elif track_ok:
_t = track_headers.get('etag')
if _t and 'If-None-Match' not in fetch['headers']:
fetch['headers']['If-None-Match'] = _t
# last modifed
if task_fetch.get('last_modified', task_fetch.get('last_modifed', True)):
last_modified = task_fetch.get('last_modified', task_fetch.get('last_modifed', True))
_t = None
if isinstance(last_modified, six.string_types):
_t = last_modified
elif track_ok:
_t = track_headers.get('last-modified')
if _t and 'If-Modified-Since' not in fetch['headers']:
fetch['headers']['If-Modified-Since'] = _t
# timeout
if 'timeout' in fetch:
fetch['request_timeout'] = fetch['timeout']
del fetch['timeout']
# data rename to body
if 'data' in fetch:
fetch['body'] = fetch['data']
del fetch['data']
return fetch
@gen.coroutine
def can_fetch(self, user_agent, url):
parsed = urlsplit(url)
domain = parsed.netloc
if domain in self.robots_txt_cache:
robot_txt = self.robots_txt_cache[domain]
if time.time() - robot_txt.mtime() > self.robot_txt_age:
robot_txt = None
else:
robot_txt = None
if robot_txt is None:
robot_txt = RobotFileParser()
try:
response = yield gen.maybe_future(self.http_client.fetch(
urljoin(url, '/robots.txt'), connect_timeout=10, request_timeout=30))
content = response.body
except tornado.httpclient.HTTPError as e:
logger.error('load robots.txt from %s error: %r', domain, e)
content = ''
try:
content = content.decode('utf8', 'ignore')
except UnicodeDecodeError:
content = ''
robot_txt.parse(content.splitlines())
self.robots_txt_cache[domain] = robot_txt
raise gen.Return(robot_txt.can_fetch(user_agent, url))
def clear_robot_txt_cache(self):
now = time.time()
for domain, robot_txt in self.robots_txt_cache.items():
if now - robot_txt.mtime() > self.robot_txt_age:
del self.robots_txt_cache[domain]
@gen.coroutine
def http_fetch(self, url, task):
'''HTTP fetcher'''
start_time = time.time()
self.on_fetch('http', task)
handle_error = lambda x: self.handle_error('http', url, task, start_time, x)
# setup request parameters
fetch = self.pack_tornado_request_parameters(url, task)
task_fetch = task.get('fetch', {})
session = cookies.RequestsCookieJar()
# fix for tornado request obj
if 'Cookie' in fetch['headers']:
c = http_cookies.SimpleCookie()
try:
c.load(fetch['headers']['Cookie'])
except AttributeError:
c.load(utils.utf8(fetch['headers']['Cookie']))
for key in c:
session.set(key, c[key])
del fetch['headers']['Cookie']
if 'cookies' in fetch:
session.update(fetch['cookies'])
del fetch['cookies']
max_redirects = task_fetch.get('max_redirects', 5)
# we will handle redirects by hand to capture cookies
fetch['follow_redirects'] = False
# making requests
while True:
# robots.txt
if task_fetch.get('robots_txt', False):
can_fetch = yield self.can_fetch(fetch['headers']['User-Agent'], fetch['url'])
if not can_fetch:
error = tornado.httpclient.HTTPError(403, 'Disallowed by robots.txt')
raise gen.Return(handle_error(error))
try:
request = tornado.httpclient.HTTPRequest(**fetch)
# if cookie already in header, get_cookie_header wouldn't work
old_cookie_header = request.headers.get('Cookie')
if old_cookie_header:
del request.headers['Cookie']
cookie_header = cookies.get_cookie_header(session, request)
if cookie_header:
request.headers['Cookie'] = cookie_header
elif old_cookie_header:
request.headers['Cookie'] = old_cookie_header
except Exception as e:
logger.exception(fetch)
raise gen.Return(handle_error(e))
try:
response = yield gen.maybe_future(self.http_client.fetch(request))
except tornado.httpclient.HTTPError as e:
if e.response:
response = e.response
else:
raise gen.Return(handle_error(e))
extract_cookies_to_jar(session, response.request, response.headers)
if (response.code in (301, 302, 303, 307)
and response.headers.get('Location')
and task_fetch.get('allow_redirects', True)):
if max_redirects <= 0:
error = tornado.httpclient.HTTPError(
599, 'Maximum (%d) redirects followed' % task_fetch.get('max_redirects', 5),
response)
raise gen.Return(handle_error(error))
if response.code in (302, 303):
fetch['method'] = 'GET'
if 'body' in fetch:
del fetch['body']
fetch['url'] = quote_chinese(urljoin(fetch['url'], response.headers['Location']))
fetch['request_timeout'] -= time.time() - start_time
if fetch['request_timeout'] < 0:
fetch['request_timeout'] = 0.1
max_redirects -= 1
continue
result = {}
result['orig_url'] = url
result['content'] = response.body or ''
result['headers'] = dict(response.headers)
result['status_code'] = response.code
result['url'] = response.effective_url or url
result['time'] = time.time() - start_time
result['cookies'] = session.get_dict()
result['save'] = task_fetch.get('save')
if response.error:
result['error'] = utils.text(response.error)
if 200 <= response.code < 300:
logger.info("[%d] %s:%s %s %.2fs", response.code,
task.get('project'), task.get('taskid'),
url, result['time'])
else:
logger.warning("[%d] %s:%s %s %.2fs", response.code,
task.get('project'), task.get('taskid'),
url, result['time'])
raise gen.Return(result)
@gen.coroutine
def phantomjs_fetch(self, url, task):
'''Fetch with phantomjs proxy'''
start_time = time.time()
self.on_fetch('phantomjs', task)
handle_error = lambda x: self.handle_error('phantomjs', url, task, start_time, x)
# check phantomjs proxy is enabled
if not self.phantomjs_proxy:
result = {
"orig_url": url,
"content": "phantomjs is not enabled.",
"headers": {},
"status_code": 501,
"url": url,
"time": time.time() - start_time,
"cookies": {},
"save": task.get('fetch', {}).get('save')
}
logger.warning("[501] %s:%s %s 0s", task.get('project'), task.get('taskid'), url)
raise gen.Return(result)
# setup request parameters
fetch = self.pack_tornado_request_parameters(url, task)
task_fetch = task.get('fetch', {})
for each in task_fetch:
if each not in fetch:
fetch[each] = task_fetch[each]
# robots.txt
if task_fetch.get('robots_txt', False):
user_agent = fetch['headers']['User-Agent']
can_fetch = yield self.can_fetch(user_agent, url)
if not can_fetch:
error = tornado.httpclient.HTTPError(403, 'Disallowed by robots.txt')
raise gen.Return(handle_error(error))
request_conf = {
'follow_redirects': False
}
request_conf['connect_timeout'] = fetch.get('connect_timeout', 20)
request_conf['request_timeout'] = fetch.get('request_timeout', 120) + 1
session = cookies.RequestsCookieJar()
request = tornado.httpclient.HTTPRequest(url=fetch['url'])
if fetch.get('cookies'):
session.update(fetch['cookies'])
if 'Cookie' in request.headers:
del request.headers['Cookie']
fetch['headers']['Cookie'] = cookies.get_cookie_header(session, request)
# making requests
fetch['headers'] = dict(fetch['headers'])
try:
request = tornado.httpclient.HTTPRequest(
url=self.phantomjs_proxy, method="POST",
body=json.dumps(fetch), **request_conf)
except Exception as e:
raise gen.Return(handle_error(e))
try:
response = yield gen.maybe_future(self.http_client.fetch(request))
except tornado.httpclient.HTTPError as e:
if e.response:
response = e.response
else:
raise gen.Return(handle_error(e))
if not response.body:
raise gen.Return(handle_error(Exception('no response from phantomjs: %r' % response)))
result = {}
try:
result = json.loads(utils.text(response.body))
assert 'status_code' in result, result
except Exception as e:
if response.error:
result['error'] = utils.text(response.error)
raise gen.Return(handle_error(e))
if result.get('status_code', 200):
logger.info("[%d] %s:%s %s %.2fs", result['status_code'],
task.get('project'), task.get('taskid'), url, result['time'])
else:
logger.error("[%d] %s:%s %s, %r %.2fs", result['status_code'],
task.get('project'), task.get('taskid'),
url, result['content'], result['time'])
raise gen.Return(result)
@gen.coroutine
def splash_fetch(self, url, task):
'''Fetch with splash'''
start_time = time.time()
self.on_fetch('splash', task)
handle_error = lambda x: self.handle_error('splash', url, task, start_time, x)
# check phantomjs proxy is enabled
if not self.splash_endpoint:
result = {
"orig_url": url,
"content": "splash is not enabled.",
"headers": {},
"status_code": 501,
"url": url,
"time": time.time() - start_time,
"cookies": {},
"save": task.get('fetch', {}).get('save')
}
logger.warning("[501] %s:%s %s 0s", task.get('project'), task.get('taskid'), url)
raise gen.Return(result)
# setup request parameters
fetch = self.pack_tornado_request_parameters(url, task)
task_fetch = task.get('fetch', {})
for each in task_fetch:
if each not in fetch:
fetch[each] = task_fetch[each]
# robots.txt
if task_fetch.get('robots_txt', False):
user_agent = fetch['headers']['User-Agent']
can_fetch = yield self.can_fetch(user_agent, url)
if not can_fetch:
error = tornado.httpclient.HTTPError(403, 'Disallowed by robots.txt')
raise gen.Return(handle_error(error))
request_conf = {
'follow_redirects': False,
'headers': {
'Content-Type': 'application/json',
}
}
request_conf['connect_timeout'] = fetch.get('connect_timeout', 20)
request_conf['request_timeout'] = fetch.get('request_timeout', 120) + 1
session = cookies.RequestsCookieJar()
request = tornado.httpclient.HTTPRequest(url=fetch['url'])
if fetch.get('cookies'):
session.update(fetch['cookies'])
if 'Cookie' in request.headers:
del request.headers['Cookie']
fetch['headers']['Cookie'] = cookies.get_cookie_header(session, request)
# making requests
fetch['lua_source'] = self.splash_lua_source
fetch['headers'] = dict(fetch['headers'])
try:
request = tornado.httpclient.HTTPRequest(
url=self.splash_endpoint, method="POST",
body=json.dumps(fetch), **request_conf)
except Exception as e:
raise gen.Return(handle_error(e))
try:
response = yield gen.maybe_future(self.http_client.fetch(request))
except tornado.httpclient.HTTPError as e:
if e.response:
response = e.response
else:
raise gen.Return(handle_error(e))
if not response.body:
raise gen.Return(handle_error(Exception('no response from phantomjs')))
result = {}
try:
result = json.loads(utils.text(response.body))
assert 'status_code' in result, result
except ValueError as e:
logger.error("result is not json: %r", response.body[:500])
raise gen.Return(handle_error(e))
except Exception as e:
if response.error:
result['error'] = utils.text(response.error)
raise gen.Return(handle_error(e))
if result.get('status_code', 200):
logger.info("[%d] %s:%s %s %.2fs", result['status_code'],
task.get('project'), task.get('taskid'), url, result['time'])
else:
logger.error("[%d] %s:%s %s, %r %.2fs", result['status_code'],
task.get('project'), task.get('taskid'),
url, result['content'], result['time'])
raise gen.Return(result)
def run(self):
'''Run loop'''
logger.info("fetcher starting...")
def queue_loop():
if not self.outqueue or not self.inqueue:
return
while not self._quit:
try:
if self.outqueue.full():
break
if self.http_client.free_size() <= 0:
break
task = self.inqueue.get_nowait()
# FIXME: decode unicode_obj should used after data selete from
# database, it's used here for performance
task = utils.decode_unicode_obj(task)
self.fetch(task)
except queue.Empty:
break
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
break
tornado.ioloop.PeriodicCallback(queue_loop, 100, io_loop=self.ioloop).start()
tornado.ioloop.PeriodicCallback(self.clear_robot_txt_cache, 10000, io_loop=self.ioloop).start()
self._running = True
try:
self.ioloop.start()
except KeyboardInterrupt:
pass
logger.info("fetcher exiting...")
def quit(self):
'''Quit fetcher'''
self._running = False
self._quit = True
self.ioloop.add_callback(self.ioloop.stop)
if hasattr(self, 'xmlrpc_server'):
self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop)
self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
def size(self):
return self.http_client.size()
def xmlrpc_run(self, port=24444, bind='127.0.0.1', logRequests=False):
'''Run xmlrpc server'''
import umsgpack
from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication
try:
from xmlrpc.client import Binary
except ImportError:
from xmlrpclib import Binary
application = WSGIXMLRPCApplication()
application.register_function(self.quit, '_quit')
application.register_function(self.size)
def sync_fetch(task):
result = self.sync_fetch(task)
result = Binary(umsgpack.packb(result))
return result
application.register_function(sync_fetch, 'fetch')
def dump_counter(_time, _type):
return self._cnt[_time].to_dict(_type)
application.register_function(dump_counter, 'counter')
import tornado.wsgi
import tornado.ioloop
import tornado.httpserver
container = tornado.wsgi.WSGIContainer(application)
self.xmlrpc_ioloop = tornado.ioloop.IOLoop()
self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop)
self.xmlrpc_server.listen(port=port, address=bind)
logger.info('fetcher.xmlrpc listening on %s:%s', bind, port)
self.xmlrpc_ioloop.start()
def on_fetch(self, type, task):
'''Called before task fetch'''
pass
def on_result(self, type, task, result):
'''Called after task fetched'''
status_code = result.get('status_code', 599)
if status_code != 599:
status_code = (int(status_code) / 100 * 100)
self._cnt['5m'].event((task.get('project'), status_code), +1)
self._cnt['1h'].event((task.get('project'), status_code), +1)
if type in ('http', 'phantomjs') and result.get('time'):
content_len = len(result.get('content', ''))
self._cnt['5m'].event((task.get('project'), 'speed'),
float(content_len) / result.get('time'))
self._cnt['1h'].event((task.get('project'), 'speed'),
float(content_len) / result.get('time'))
self._cnt['5m'].event((task.get('project'), 'time'), result.get('time'))
self._cnt['1h'].event((task.get('project'), 'time'), result.get('time'))
| apache-2.0 | 1,857,981,678,926,246,400 | 36.887029 | 110 | 0.539555 | false |
mjwestcott/projecteuler | python/problem64.py | 1 | 1634 | """
problem64.py
https://projecteuler.net/problem=64
The first ten continued fraction representations of (irrational) square roots are:
sqrt(2)=[1;(2)] period=1
sqrt(3)=[1;(1,2)] period=2
sqrt(5)=[2;(4)] period=1
sqrt(6)=[2;(2,4)] period=2
sqrt(7)=[2;(1,1,1,4)] period=4
sqrt(8)=[2;(1,4)] period=2
sqrt(10)=[3;(6)] period=1
sqrt(11)=[3;(3,6)] period=2
sqrt(12)=[3;(2,6)] period=2
sqrt(13)=[3;(1,1,1,1,6)] period=5
Exactly four continued fractions, for N <= 13, have an odd period. How many
continued fractions for N <= 10000 have an odd period?
"""
from math import floor, sqrt
from toolset import quantify
def continued_fraction_sqrt(S):
# https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Continued_fraction_expansion
# Using variables S, m, d, a as in the URL above.
m = 0
d = 1
a = floor(sqrt(S))
seen = []
while True:
seen.append([m, d, a]) # The algorithm terminates when [m, d, a] repeats
m = (d * a) - m
d = (S - m**2) / d
if d == 0: # S is a perfect square
return [a]
a = floor((floor(sqrt(S)) + m) / d)
if [m, d, a] in seen:
return [x[2] for x in seen] # The third element is the variable 'a' we want.
def problem64():
continued_fractions = (continued_fraction_sqrt(i) for i in range(2, 10000+1))
odd_period = lambda x: len(x) % 2 == 0 # The first element is not part of the period.
return quantify(continued_fractions, pred=odd_period)
if __name__ == "__main__":
print(problem64())
| mit | -679,435,573,260,069,200 | 31.68 | 98 | 0.575275 | false |
kaji-project/shinken | shinken/objects/timeperiod.py | 1 | 32079 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# Calendar date
# -------------
# '(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) / (\d+) ([0-9:, -]+)'
# => len = 8 => CALENDAR_DATE
#
# '(\d{4})-(\d{2})-(\d{2}) / (\d+) ([0-9:, -]+)'
# => len = 5 => CALENDAR_DATE
#
# '(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) ([0-9:, -]+)'
# => len = 7 => CALENDAR_DATE
#
# '(\d{4})-(\d{2})-(\d{2}) ([0-9:, -]+)'
# => len = 4 => CALENDAR_DATE
#
# Month week day
# --------------
# '([a-z]*) (\d+) ([a-z]*) - ([a-z]*) (\d+) ([a-z]*) / (\d+) ([0-9:, -]+)'
# => len = 8 => MONTH WEEK DAY
# e.g.: wednesday 1 january - thursday 2 july / 3
#
# '([a-z]*) (\d+) - ([a-z]*) (\d+) / (\d+) ([0-9:, -]+)' => len = 6
# e.g.: february 1 - march 15 / 3 => MONTH DATE
# e.g.: monday 2 - thusday 3 / 2 => WEEK DAY
# e.g.: day 2 - day 6 / 3 => MONTH DAY
#
# '([a-z]*) (\d+) - (\d+) / (\d+) ([0-9:, -]+)' => len = 6
# e.g.: february 1 - 15 / 3 => MONTH DATE
# e.g.: thursday 2 - 4 => WEEK DAY
# e.g.: day 1 - 4 => MONTH DAY
#
# '([a-z]*) (\d+) ([a-z]*) - ([a-z]*) (\d+) ([a-z]*) ([0-9:, -]+)' => len = 7
# e.g.: wednesday 1 january - thursday 2 july => MONTH WEEK DAY
#
# '([a-z]*) (\d+) - (\d+) ([0-9:, -]+)' => len = 7
# e.g.: thursday 2 - 4 => WEEK DAY
# e.g.: february 1 - 15 / 3 => MONTH DATE
# e.g.: day 1 - 4 => MONTH DAY
#
# '([a-z]*) (\d+) - ([a-z]*) (\d+) ([0-9:, -]+)' => len = 5
# e.g.: february 1 - march 15 => MONTH DATE
# e.g.: monday 2 - thusday 3 => WEEK DAY
# e.g.: day 2 - day 6 => MONTH DAY
#
# '([a-z]*) (\d+) ([0-9:, -]+)' => len = 3
# e.g.: february 3 => MONTH DATE
# e.g.: thursday 2 => WEEK DAY
# e.g.: day 3 => MONTH DAY
#
# '([a-z]*) (\d+) ([a-z]*) ([0-9:, -]+)' => len = 4
# e.g.: thusday 3 february => MONTH WEEK DAY
#
# '([a-z]*) ([0-9:, -]+)' => len = 6
# e.g.: thusday => normal values
#
# Types: CALENDAR_DATE
# MONTH WEEK DAY
# WEEK DAY
# MONTH DATE
# MONTH DAY
#
import time
import re
from item import Item, Items
from shinken.daterange import Daterange, CalendarDaterange
from shinken.daterange import StandardDaterange, MonthWeekDayDaterange
from shinken.daterange import MonthDateDaterange, WeekDayDaterange
from shinken.daterange import MonthDayDaterange
from shinken.brok import Brok
from shinken.property import IntegerProp, StringProp, ListProp, BoolProp
from shinken.log import logger, naglog_result
class Timeperiod(Item):
id = 1
my_type = 'timeperiod'
properties = Item.properties.copy()
properties.update({
'timeperiod_name': StringProp(fill_brok=['full_status']),
'alias': StringProp(default='', fill_brok=['full_status']),
'use': StringProp(default=None),
'register': IntegerProp(default=1),
# These are needed if a broker module calls methods on timeperiod objects
'dateranges': ListProp(fill_brok=['full_status'], default=[]),
'exclude': ListProp(fill_brok=['full_status'], default=[]),
'is_active': BoolProp(default=False)
})
running_properties = Item.running_properties.copy()
def __init__(self, params={}):
self.id = Timeperiod.id
Timeperiod.id = Timeperiod.id + 1
self.unresolved = []
self.dateranges = []
self.exclude = ''
self.customs = {}
self.plus = {}
self.invalid_entries = []
for key in params:
# timeperiod objects are too complicated to support multi valued
# attributes. we do as usual, last set value wins.
if isinstance(params[key], list):
if params[key]:
params[key] = params[key][-1]
else:
params[key] = ''
if key in ['name', 'alias', 'timeperiod_name', 'exclude', 'use', 'register', 'imported_from', 'is_active', 'dateranges']:
setattr(self, key, self.properties[key].pythonize(params[key]))
elif key.startswith('_'):
self.customs[key.upper()] = params[key]
else:
self.unresolved.append(key + ' ' + params[key])
self.cache = {} # For tunning purpose only
self.invalid_cache = {} # same but for invalid search
self.configuration_errors = []
self.configuration_warnings = []
# By default the tp is None so we know we just start
self.is_active = None
self.tags = set()
def get_name(self):
return getattr(self, 'timeperiod_name', 'unknown_timeperiod')
# We fillfull properties with template ones if need
# for the unresolved values (like sunday ETCETC)
def get_unresolved_properties_by_inheritance(self, items):
# Ok, I do not have prop, Maybe my templates do?
# Same story for plus
for i in self.templates:
self.unresolved.extend(i.unresolved)
# Ok timeperiods are a bit different from classic items, because we do not have a real list
# of our raw properties, like if we got february 1 - 15 / 3 for example
def get_raw_import_values(self):
properties = ['timeperiod_name', 'alias', 'use', 'register']
r = {}
for prop in properties:
if hasattr(self, prop):
v = getattr(self, prop)
print prop, ":", v
r[prop] = v
# Now the unresolved one. The only way to get ride of same key things is to put
# directly the full value as the key
for other in self.unresolved:
r[other] = ''
return r
def is_time_valid(self, t):
if self.has('exclude'):
for dr in self.exclude:
if dr.is_time_valid(t):
return False
for dr in self.dateranges:
if dr.is_time_valid(t):
return True
return False
# will give the first time > t which is valid
def get_min_from_t(self, t):
mins_incl = []
for dr in self.dateranges:
mins_incl.append(dr.get_min_from_t(t))
return min(mins_incl)
# will give the first time > t which is not valid
def get_not_in_min_from_t(self, f):
pass
def find_next_valid_time_from_cache(self, t):
try:
return self.cache[t]
except KeyError:
return None
def find_next_invalid_time_from_cache(self, t):
try:
return self.invalid_cache[t]
except KeyError:
return None
# will look for active/un-active change. And log it
# [1327392000] TIMEPERIOD TRANSITION: <name>;<from>;<to>
# from is -1 on startup. to is 1 if the timeperiod starts
# and 0 if it ends.
def check_and_log_activation_change(self):
now = int(time.time())
was_active = self.is_active
self.is_active = self.is_time_valid(now)
# If we got a change, log it!
if self.is_active != was_active:
_from = 0
_to = 0
# If it's the start, get a special value for was
if was_active is None:
_from = -1
if was_active:
_from = 1
if self.is_active:
_to = 1
# Now raise the log
naglog_result('info', 'TIMEPERIOD TRANSITION: %s;%d;%d'
% (self.get_name(), _from, _to))
# clean the get_next_valid_time_from_t cache
# The entries are a dict on t. t < now are useless
# Because we do not care about past anymore.
# If not, it's not important, it's just a cache after all :)
def clean_cache(self):
now = int(time.time())
t_to_del = []
for t in self.cache:
if t < now:
t_to_del.append(t)
for t in t_to_del:
del self.cache[t]
# same for the invalid cache
t_to_del = []
for t in self.invalid_cache:
if t < now:
t_to_del.append(t)
for t in t_to_del:
del self.invalid_cache[t]
def get_next_valid_time_from_t(self, t):
# first find from cache
t = int(t)
original_t = t
#logger.debug("[%s] Check valid time for %s" % ( self.get_name(), time.asctime(time.localtime(t)))
res_from_cache = self.find_next_valid_time_from_cache(t)
if res_from_cache is not None:
return res_from_cache
still_loop = True
# Loop for all minutes...
while still_loop:
local_min = None
# Ok, not in cache...
dr_mins = []
s_dr_mins = []
for dr in self.dateranges:
dr_mins.append(dr.get_next_valid_time_from_t(t))
s_dr_mins = sorted([d for d in dr_mins if d is not None])
for t1 in s_dr_mins:
if not self.exclude and still_loop is True:
# No Exclude so we are good
local_min = t1
still_loop = False
else:
for tp in self.exclude:
if not tp.is_time_valid(t1) and still_loop is True:
# OK we found a date that is not valid in any exclude timeperiod
local_min = t1
still_loop = False
if local_min is None:
# print "Looking for next valid date"
exc_mins = []
if s_dr_mins != []:
for tp in self.exclude:
exc_mins.append(tp.get_next_invalid_time_from_t(s_dr_mins[0]))
s_exc_mins = sorted([d for d in exc_mins if d is not None])
if s_exc_mins != []:
local_min = s_exc_mins[0]
if local_min is None:
still_loop = False
else:
t = local_min
# No loop more than one year
if t > original_t + 3600*24*366 + 1:
still_loop = False
local_min = None
# Ok, we update the cache...
self.cache[original_t] = local_min
return local_min
def get_next_invalid_time_from_t(self, t):
#print '\n\n', self.get_name(), 'Search for next invalid from', time.asctime(time.localtime(t)), t
t = int(t)
original_t = t
still_loop = True
# First try to find in cache
res_from_cache = self.find_next_invalid_time_from_cache(t)
if res_from_cache is not None:
return res_from_cache
# Then look, maybe t is already invalid
if not self.is_time_valid(t):
return t
local_min = t
res = None
# Loop for all minutes...
while still_loop:
#print "Invalid loop with", time.asctime(time.localtime(local_min))
dr_mins = []
#val_valids = []
#val_inval = []
# But maybe we can find a better solution with next invalid of standard dateranges
#print self.get_name(), "After valid of exclude, local_min =", time.asctime(time.localtime(local_min))
for dr in self.dateranges:
#print self.get_name(), "Search a next invalid from DR", time.asctime(time.localtime(local_min))
#print dr.__dict__
m = dr.get_next_invalid_time_from_t(local_min)
#print self.get_name(), "Dr", dr.__dict__, "give me next invalid", time.asctime(time.localtime(m))
if m is not None:
# But maybe it's invalid for this dr, but valid for other ones.
#if not self.is_time_valid(m):
# print "Final: Got a next invalid at", time.asctime(time.localtime(m))
dr_mins.append(m)
#if not self.is_time_valid(m):
# val_inval.append(m)
#else:
# val_valids.append(m)
# print "Add a m", time.asctime(time.localtime(m))
#else:
# print dr.__dict__
# print "FUCK bad result\n\n\n"
#print "Inval"
#for v in val_inval:
# print "\t", time.asctime(time.localtime(v))
#print "Valid"
#for v in val_valids:
# print "\t", time.asctime(time.localtime(v))
if dr_mins != []:
local_min = min(dr_mins)
# Take the minimum valid as lower for next search
#local_min_valid = 0
#if val_valids != []:
# local_min_valid = min(val_valids)
#if local_min_valid != 0:
# local_min = local_min_valid
#else:
# local_min = min(dr_mins)
#print "UPDATE After dr: found invalid local min:", time.asctime(time.localtime(local_min)), "is valid", self.is_time_valid(local_min)
#print self.get_name(), 'Invalid: local min', local_min #time.asctime(time.localtime(local_min))
# We do not loop unless the local_min is not valid
if not self.is_time_valid(local_min):
still_loop = False
else: # continue until we reach too far..., in one minute
# After one month, go quicker...
if local_min > original_t + 3600*24*30:
local_min += 3600
else: # else search for 1min precision
local_min += 60
# after one year, stop.
if local_min > original_t + 3600*24*366 + 1: # 60*24*366 + 1:
still_loop = False
#print "Loop?", still_loop
# if we've got a real value, we check it with the exclude
if local_min is not None:
# Now check if local_min is not valid
for tp in self.exclude:
#print self.get_name(),"we check for invalid", time.asctime(time.localtime(local_min)), 'with tp', tp.name
if tp.is_time_valid(local_min):
still_loop = True
# local_min + 60
local_min = tp.get_next_invalid_time_from_t(local_min+60)
# No loop more than one year
if local_min > original_t + 60*24*366 + 1:
still_loop = False
res = None
if not still_loop: # We find a possible value
# We take the result the minimal possible
if res is None or local_min < res:
res = local_min
#print "Finished Return the next invalid", time.asctime(time.localtime(local_min))
# Ok, we update the cache...
self.invalid_cache[original_t] = local_min
return local_min
def has(self, prop):
return hasattr(self, prop)
# We are correct only if our daterange are
# and if we have no unmatch entries
def is_correct(self):
b = True
for dr in self.dateranges:
d = dr.is_correct()
if not d:
logger.error("[timeperiod::%s] invalid daterange ", self.get_name())
b &= d
# Warn about non correct entries
for e in self.invalid_entries:
logger.warning("[timeperiod::%s] invalid entry '%s'", self.get_name(), e)
return b
def __str__(self):
s = ''
s += str(self.__dict__) + '\n'
for elt in self.dateranges:
s += str(elt)
(start, end) = elt.get_start_and_end_time()
start = time.asctime(time.localtime(start))
end = time.asctime(time.localtime(end))
s += "\nStart and end:" + str((start, end))
s += '\nExclude'
for elt in self.exclude:
s += str(elt)
return s
def resolve_daterange(self, dateranges, entry):
#print "Trying to resolve ", entry
res = re.search('(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 1"
(syear, smon, smday, eyear, emon, emday, skip_interval, other) = res.groups()
dateranges.append(CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday, 0, 0, skip_interval, other))
return
res = re.search('(\d{4})-(\d{2})-(\d{2}) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 2"
(syear, smon, smday, skip_interval, other) = res.groups()
eyear = syear
emon = smon
emday = smday
dateranges.append(CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday, 0, 0, skip_interval, other))
return
res = re.search('(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2})[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 3"
(syear, smon, smday, eyear, emon, emday, other) = res.groups()
dateranges.append(CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday, 0, 0, 0, other))
return
res = re.search('(\d{4})-(\d{2})-(\d{2})[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 4"
(syear, smon, smday, other) = res.groups()
eyear = syear
emon = smon
emday = smday
dateranges.append(CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday, 0, 0, 0, other))
return
res = re.search('([a-z]*) ([\d-]+) ([a-z]*) - ([a-z]*) ([\d-]+) ([a-z]*) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 5"
(swday, swday_offset, smon, ewday, ewday_offset, emon, skip_interval, other) = res.groups()
dateranges.append(MonthWeekDayDaterange(0, smon, 0, swday, swday_offset, 0, emon, 0, ewday, ewday_offset, skip_interval, other))
return
res = re.search('([a-z]*) ([\d-]+) - ([a-z]*) ([\d-]+) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 6"
(t0, smday, t1, emday, skip_interval, other) = res.groups()
if t0 in Daterange.weekdays and t1 in Daterange.weekdays:
swday = t0
ewday = t1
swday_offset = smday
ewday_offset = emday
dateranges.append(WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0, ewday, ewday_offset, skip_interval, other))
return
elif t0 in Daterange.months and t1 in Daterange.months:
smon = t0
emon = t1
dateranges.append(MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0, 0, skip_interval, other))
return
elif t0 == 'day' and t1 == 'day':
dateranges.append(MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, skip_interval, other))
return
res = re.search('([a-z]*) ([\d-]+) - ([\d-]+) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 7"
(t0, smday, emday, skip_interval, other) = res.groups()
if t0 in Daterange.weekdays:
swday = t0
swday_offset = smday
ewday = swday
ewday_offset = emday
dateranges.append(WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0, ewday, ewday_offset, skip_interval, other))
return
elif t0 in Daterange.months:
smon = t0
emon = smon
dateranges.append(MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0, 0, skip_interval, other))
return
elif t0 == 'day':
dateranges.append(MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, skip_interval, other))
return
res = re.search('([a-z]*) ([\d-]+) ([a-z]*) - ([a-z]*) ([\d-]+) ([a-z]*) [\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 8"
(swday, swday_offset, smon, ewday, ewday_offset, emon, other) = res.groups()
#print "Debug:", (swday, swday_offset, smon, ewday, ewday_offset, emon, other)
dateranges.append(MonthWeekDayDaterange(0, smon, 0, swday, swday_offset, 0, emon, 0, ewday, ewday_offset, 0, other))
return
res = re.search('([a-z]*) ([\d-]+) - ([\d-]+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 9"
(t0, smday, emday, other) = res.groups()
if t0 in Daterange.weekdays:
swday = t0
swday_offset = smday
ewday = swday
ewday_offset = emday
dateranges.append(WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0, ewday, ewday_offset, 0, other))
return
elif t0 in Daterange.months:
smon = t0
emon = smon
dateranges.append(MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0, 0, 0, other))
return
elif t0 == 'day':
dateranges.append(MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, 0, other))
return
res = re.search('([a-z]*) ([\d-]+) - ([a-z]*) ([\d-]+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 10"
(t0, smday, t1, emday, other) = res.groups()
if t0 in Daterange.weekdays and t1 in Daterange.weekdays:
swday = t0
ewday = t1
swday_offset = smday
ewday_offset = emday
dateranges.append(WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0, ewday, ewday_offset, 0, other))
return
elif t0 in Daterange.months and t1 in Daterange.months:
smon = t0
emon = t1
dateranges.append(MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0, 0, 0, other))
return
elif t0 == 'day' and t1 == 'day':
dateranges.append(MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, 0, other))
return
res = re.search('([a-z]*) ([\d-]+) ([a-z]*)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 11"
(t0, swday_offset, t1, other) = res.groups()
if t0 in Daterange.weekdays and t1 in Daterange.months:
swday = t0
smon = t1
emon = smon
ewday = swday
ewday_offset = swday_offset
dateranges.append(MonthWeekDayDaterange(0, smon, 0, swday, swday_offset, 0, emon, 0, ewday, ewday_offset, 0, other))
return
res = re.search('([a-z]*) ([\d-]+)[\s\t]+([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 12"
(t0, smday, other) = res.groups()
if t0 in Daterange.weekdays:
swday = t0
swday_offset = smday
ewday = swday
ewday_offset = swday_offset
dateranges.append(WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0, ewday, ewday_offset, 0, other))
return
if t0 in Daterange.months:
smon = t0
emon = smon
emday = smday
dateranges.append(MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0, 0, 0, other))
return
if t0 == 'day':
emday = smday
dateranges.append(MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, 0, other))
return
res = re.search('([a-z]*)[\s\t]+([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 13"
(t0, other) = res.groups()
if t0 in Daterange.weekdays:
day = t0
dateranges.append(StandardDaterange(day, other))
return
logger.info("[timeentry::%s] no match for %s", self.get_name(), entry)
self.invalid_entries.append(entry)
def apply_inheritance(self):
pass
# create daterange from unresolved param
def explode(self, timeperiods):
for entry in self.unresolved:
#print "Revolving entry", entry
self.resolve_daterange(self.dateranges, entry)
self.unresolved = []
# Will make tp in exclude with id of the timeperiods
def linkify(self, timeperiods):
new_exclude = []
if self.has('exclude') and self.exclude != []:
logger.debug("[timeentry::%s] have excluded %s", self.get_name(), self.exclude)
excluded_tps = self.exclude
#print "I will exclude from:", excluded_tps
for tp_name in excluded_tps:
tp = timeperiods.find_by_name(tp_name.strip())
if tp is not None:
new_exclude.append(tp)
else:
logger.error("[timeentry::%s] unknown %s timeperiod", self.get_name(), tp_name)
self.exclude = new_exclude
def check_exclude_rec(self):
if self.rec_tag:
logger.error("[timeentry::%s] is in a loop in exclude parameter", self.get_name())
return False
self.rec_tag = True
for tp in self.exclude:
tp.check_exclude_rec()
return True
def fill_data_brok_from(self, data, brok_type):
cls = self.__class__
# Now config properties
for prop, entry in cls.properties.items():
# Is this property intended for broking?
#if 'fill_brok' in entry:
if brok_type in entry.fill_brok:
if hasattr(self, prop):
data[prop] = getattr(self, prop)
elif entry.has_default:
data[prop] = entry.default
# Get a brok with initial status
def get_initial_status_brok(self):
cls = self.__class__
my_type = cls.my_type
data = {'id': self.id}
self.fill_data_brok_from(data, 'full_status')
b = Brok('initial_' + my_type + '_status', data)
return b
class Timeperiods(Items):
name_property = "timeperiod_name"
inner_class = Timeperiod
def explode(self):
for id in self.items:
tp = self.items[id]
tp.explode(self)
def linkify(self):
for id in self.items:
tp = self.items[id]
tp.linkify(self)
def apply_inheritance(self):
# The only interesting property to inherit is exclude
self.apply_partial_inheritance('exclude')
for i in self:
i.get_customs_properties_by_inheritance()
# And now apply inheritance for unresolved properties
# like the dateranges in fact
for tp in self:
tp.get_unresolved_properties_by_inheritance(self.items)
# check for loop in definition
def is_correct(self):
r = True
# We do not want a same hg to be explode again and again
# so we tag it
for tp in self.items.values():
tp.rec_tag = False
for tp in self.items.values():
for tmp_tp in self.items.values():
tmp_tp.rec_tag = False
r &= tp.check_exclude_rec()
# We clean the tags
for tp in self.items.values():
del tp.rec_tag
# And check all timeperiods for correct (sunday is false)
for tp in self:
r &= tp.is_correct()
return r
if __name__ == '__main__':
t = Timeperiod()
test = ['1999-01-28 00:00-24:00',
'monday 3 00:00-24:00 ',
'day 2 00:00-24:00',
'february 10 00:00-24:00',
'february -1 00:00-24:00',
'friday -2 00:00-24:00',
'thursday -1 november 00:00-24:00',
'2007-01-01 - 2008-02-01 00:00-24:00',
'monday 3 - thursday 4 00:00-24:00',
'day 1 - 15 00:00-24:00',
'day 20 - -1 00:00-24:00',
'july -10 - -1 00:00-24:00',
'april 10 - may 15 00:00-24:00',
'tuesday 1 april - friday 2 may 00:00-24:00',
'2007-01-01 - 2008-02-01 / 3 00:00-24:00',
'2008-04-01 / 7 00:00-24:00',
'day 1 - 15 / 5 00:00-24:00',
'july 10 - 15 / 2 00:00-24:00',
'tuesday 1 april - friday 2 may / 6 00:00-24:00',
'tuesday 1 october - friday 2 may / 6 00:00-24:00',
'monday 3 - thursday 4 / 2 00:00-24:00',
'monday 4 - thursday 3 / 2 00:00-24:00',
'day -1 - 15 / 5 01:00-24:00,00:30-05:60',
'tuesday 00:00-24:00',
'sunday 00:00-24:00',
'saturday 03:00-24:00,00:32-01:02',
'wednesday 09:00-15:46,00:00-21:00',
'may 7 - february 2 00:00-10:00',
'day -1 - 5 00:00-10:00',
'tuesday 1 february - friday 1 may 01:00-24:00,00:30-05:60',
'december 2 - may -15 00:00-24:00',
]
for entry in test:
print "**********************"
print entry
t = Timeperiod()
t.timeperiod_name = ''
t.resolve_daterange(t.dateranges, entry)
#t.exclude = []
#t.resolve_daterange(t.exclude, 'monday 00:00-19:00')
#t.check_valid_for_today()
now = time.time()
#print "Is valid NOW?", t.is_time_valid(now)
t_next = t.get_next_valid_time_from_t(now + 5*60)
if t_next is not None:
print "Get next valid for now + 5 min ==>", time.asctime(time.localtime(t_next)), "<=="
else:
print "===> No future time!!!"
#print "End date:", t.get_end_time()
#print "Next valid", time.asctime(time.localtime(t.get_next_valid_time()))
print str(t) + '\n\n'
print "*************************************************************"
t3 = Timeperiod()
t3.timeperiod_name = 't3'
t3.resolve_daterange(t3.dateranges, 'day 1 - 10 10:30-15:00')
t3.exclude = []
t2 = Timeperiod()
t2.timeperiod_name = 't2'
t2.resolve_daterange(t2.dateranges, 'day 1 - 10 12:00-17:00')
t2.exclude = [t3]
t = Timeperiod()
t.timeperiod_name = 't'
t.resolve_daterange(t.dateranges, 'day 1 - 10 14:00-15:00')
t.exclude = [t2]
print "Mon T", str(t) + '\n\n'
t_next = t.get_next_valid_time_from_t(now)
t_no_next = t.get_next_invalid_time_from_t(now)
print "Get next valid for now ==>", time.asctime(time.localtime(t_next)), "<=="
print "Get next invalid for now ==>", time.asctime(time.localtime(t_no_next)), "<=="
| agpl-3.0 | 7,439,761,182,976,732,000 | 38.216381 | 150 | 0.508276 | false |
iglpdc/dmrg101 | dmrg101/utils/models/hubbard_model.py | 1 | 4153 | """A few convenience functions to setup the Hubbard model.
.. math::
H=\sum_{i}\vec{S}_{i}\cdot\vec{S}_{i+1}=
\sum_{i}\left[S^{z}_{i}S^{z}_{i+1}+
\frac{1}{2}\left(S^{\dagger}_{i}S^{-}_{i+1}+
S^{-}_{i}S^{\dagger}_{i+1}\right)\right]
"""
class HubbardModel(object):
"""Implements a few convenience functions for Hubbard model.
Does exactly that.
"""
def __init__(self):
super(HubbardModel, self).__init__()
self.U = 0.
def set_hamiltonian(self, system):
"""Sets a system Hamiltonian to the Hubbard Hamiltonian.
Does exactly this. If the system hamiltonian has some other terms on
it, there are not touched. So be sure to use this function only in
newly created `System` objects.
Parameters
----------
system : a System.
The System you want to set the Hamiltonian for.
"""
system.clear_hamiltonian()
if 'bh' in system.left_block.operators.keys():
system.add_to_hamiltonian(left_block_op='bh')
if 'bh' in system.right_block.operators.keys():
system.add_to_hamiltonian(right_block_op='bh')
system.add_to_hamiltonian('c_up', 'c_up_dag', 'id', 'id', -1.)
system.add_to_hamiltonian('c_up_dag', 'c_up', 'id', 'id', -1.)
system.add_to_hamiltonian('c_down', 'c_down_dag', 'id', 'id', -1.)
system.add_to_hamiltonian('c_down_dag', 'c_down', 'id', 'id', -1.)
system.add_to_hamiltonian('id', 'c_up', 'c_up_dag', 'id', -1.)
system.add_to_hamiltonian('id', 'c_up_dag', 'c_up', 'id', -1.)
system.add_to_hamiltonian('id', 'c_down', 'c_down_dag', 'id', -1.)
system.add_to_hamiltonian('id', 'c_down_dag', 'c_down', 'id', -1.)
system.add_to_hamiltonian('id', 'id', 'c_up', 'c_up_dag', -1.)
system.add_to_hamiltonian('id', 'id', 'c_up_dag', 'c_up', -1.)
system.add_to_hamiltonian('id', 'id', 'c_down', 'c_down_dag', -1.)
system.add_to_hamiltonian('id', 'id', 'c_down_dag', 'c_down', -1.)
system.add_to_hamiltonian('u', 'id', 'id', 'id', self.U)
system.add_to_hamiltonian('id', 'u', 'id', 'id', self.U)
system.add_to_hamiltonian('id', 'id', 'u', 'id', self.U)
system.add_to_hamiltonian('id', 'id', 'id', 'u', self.U)
def set_block_hamiltonian(self, tmp_matrix_for_bh, system):
"""Sets the block Hamiltonian to the Hubbard model block Hamiltonian.
Parameters
----------
tmp_matrix_for_bh : a numpy array of ndim = 2.
An auxiliary matrix to keep track of the result.
system : a System.
The System you want to set the Hamiltonian for.
"""
# If you have a block hamiltonian in your block, add it
if 'bh' in system.growing_block.operators.keys():
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'c_up', 'c_up_dag', -1.)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'c_up_dag', 'c_up', -1.)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'c_down', 'c_down_dag', -1.)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'c_down_dag', 'c_down', -1.)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'u', self.U)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'u', 'id', self.U)
def set_operators_to_update(self, system):
"""Sets the operators to update to the ones for the Hubbard model.
Parameters
----------
system : a System.
The System you want to set the Hamiltonian for.
Notes
-----
The block Hamiltonian, althought needs to be updated, is treated
separately by the very functions in the `System` class.
"""
system.add_to_operators_to_update('c_up', site_op='c_up')
system.add_to_operators_to_update('c_up_dag', site_op='c_up_dag')
system.add_to_operators_to_update('c_down', site_op='c_down')
system.add_to_operators_to_update('c_down_dag', site_op='c_down_dag')
system.add_to_operators_to_update('u', site_op='u')
| mit | -898,470,415,023,037,800 | 45.144444 | 87 | 0.57934 | false |
herrera-luis-alberto/UChSonicAnemometer | software/utilities.py | 1 | 2375 | #
# Copyright (C) 2013 UNIVERSIDAD DE CHILE.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Luis Alberto Herrera <[email protected]>
import nonuniform_sampled_signal
import uniform_sampled_signal
from scipy import signal as scipysignal
import numpy as np
MEASURING_DIRECTIONS = ["NORTH", ]
SAMPLES_PER_DIRECTION = 3000
EXCITATION_SAMPLES = 300
def split_signal(signal):
""" Splits a signal into all measuring direccions and aligns the zero
timestamp to the rising edge of the exitation of the transducer. """
responses = dict()
for i in range(len(MEASURING_DIRECTIONS)):
frame = uniform_sampled_signal.UniformSampledSignal(
signal.values[i*SAMPLES_PER_DIRECTION:(i+1)*SAMPLES_PER_DIRECTION-1],
signal.sampling_rate)
threshold = 1500
start_of_response = 0
for j in range(frame.values.size):
if abs(frame.values[j]) > threshold:
start_of_response = j+EXCITATION_SAMPLES
frame.values = frame.values[start_of_response:].astype(np.float32)
frame.values.resize((SAMPLES_PER_DIRECTION, ))
frame.values *= 1.0/frame.values.max()
frame.start_timestamp = (EXCITATION_SAMPLES+2.0)/frame.sampling_rate
break
responses[MEASURING_DIRECTIONS[i]] = frame
return responses
def get_signal_envelope(signal):
envelope = np.abs(scipysignal.hilbert(signal.values))
result = uniform_sampled_signal.UniformSampledSignal(
envelope, signal.sampling_rate)
result.start_timestamp = signal.start_timestamp
return result
def plot_signal_list(signals, axes, string_format):
for signal in signals:
plot_signal(signal, axes, string_format)
def plot_signal(signal, axes, string_format):
axes.plot(signal.get_timestamp_array(), signal.values, string_format)
| gpl-3.0 | -7,615,983,306,939,609,000 | 36.109375 | 77 | 0.733474 | false |
Trax-air/swagger-tester | setup.py | 1 | 1604 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pip.req import parse_requirements
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [str(i.req) for i in parse_requirements('requirements.txt', session=False)]
test_requirements = [str(i.req) for i in parse_requirements('requirements_dev.txt', session=False)]
setup(
name='swagger_tester',
version='0.2.8',
description="Automatically test your swagger API",
long_description=readme + '\n\n' + history,
author="Cyprien Guillemot",
author_email='[email protected]',
url='https://github.com/Trax-air/swagger-tester',
packages=[
'swagger_tester',
],
package_dir={'swagger_tester':
'swagger_tester'},
include_package_data=True,
setup_requires=['pytest-runner'],
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='swagger, tester, API, REST, swagger-tester',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| mit | -5,706,269,082,245,072,000 | 30.45098 | 99 | 0.647756 | false |
amueller/word_cloud | examples/emoji.py | 1 | 2207 | #!/usr/bin/env python
"""
Emoji Example
===============
A simple example that shows how to include emoji. Note that this example does not seem to work on OS X, but does
work correctly in Ubuntu.
There are 3 important steps to follow to include emoji:
1) Read the text input with io.open instead of the built in open. This ensures that it is loaded as UTF-8
2) Override the regular expression used by word cloud to parse the text into words. The default expression
will only match ascii words
3) Override the default font to something that supports emoji. The included Symbola font includes black and
white outlines for most emoji. There are currently issues with the PIL/Pillow library that seem to prevent
it from functioning correctly on OS X (https://github.com/python-pillow/Pillow/issues/1774), so try this
on ubuntu if you are having problems.
"""
import io
import os
import string
from os import path
from wordcloud import WordCloud
# get data directory (using getcwd() is needed to support running example in generated IPython notebook)
d = path.dirname(__file__) if "__file__" in locals() else os.getcwd()
# It is important to use io.open to correctly load the file as UTF-8
text = io.open(path.join(d, 'happy-emoji.txt')).read()
# the regex used to detect words is a combination of normal words, ascii art, and emojis
# 2+ consecutive letters (also include apostrophes), e.x It's
normal_word = r"(?:\w[\w']+)"
# 2+ consecutive punctuations, e.x. :)
ascii_art = r"(?:[{punctuation}][{punctuation}]+)".format(punctuation=string.punctuation)
# a single character that is not alpha_numeric or other ascii printable
emoji = r"(?:[^\s])(?<![\w{ascii_printable}])".format(ascii_printable=string.printable)
regexp = r"{normal_word}|{ascii_art}|{emoji}".format(normal_word=normal_word, ascii_art=ascii_art,
emoji=emoji)
# Generate a word cloud image
# The Symbola font includes most emoji
font_path = path.join(d, 'fonts', 'Symbola', 'Symbola.ttf')
wc = WordCloud(font_path=font_path, regexp=regexp).generate(text)
# Display the generated image:
# the matplotlib way:
import matplotlib.pyplot as plt
plt.imshow(wc)
plt.axis("off")
plt.show()
| mit | -5,022,895,760,860,490,000 | 44.040816 | 113 | 0.725872 | false |
jrugis/cell_mesh | vox_geometry_ORIGINAL.py | 1 | 2431 | #!/usr/bin/python
import numpy as np
import libtiff as tf
import time as tm
from operator import add
import geo
fdir = "layers/"
tdir = "geometry/"
fname = "cellsN8R"
# time the code
t1 = tm.time()
# get the reduced image stack
print "load image stack"
f = tf.TIFF3D.open(fdir+fname+".tif", mode='r')
images = f.read_image() # load the image stack
f.close()
xsize = images.shape[1] # side dimension of images
ysize = images.shape[2]
print " image grid:", images.shape
# replace grey-scale values with cell number 1-7, empty space = 0
# HARD CODED GREY-SCALE VALUES !!!!
print "renumber cells"
d = {"0":0, "139":4, "162":2, "175":3, "201":7, "208":6, "222":1, "234":5}
for v in np.nditer(images, op_flags=['readwrite']): v[...] = d[str(v)]
# add top and bottom empty padding layers (full surround empty space)
print "add padding layers"
temp = np.zeros((1, xsize, ysize), dtype=np.uint8)
images = np.concatenate((temp, images, temp))
zsize = images.shape[0]
print " voxel grid:", images.shape
# adjacency label and sort
print "adjacency label and sort"
sl = [('label', 'S8'), ('hit', np.int8)] # space point label
slabels = np.zeros((xsize-1,ysize-1,zsize-1), dtype=sl) # space point array
pt = [('label', 'S8'), ('ijk', np.int16, (3))] # point label & coordinates
pnts = np.zeros(xsize*ysize*zsize, dtype=pt) # overkill size
pcnt = 0
for k in range(zsize-1):
for j in range(ysize-1):
for i in range(xsize-1):
p8 = images[k:k+2,j:j+2,i:i+2] # (2x2x2) voxel kernel
vals = np.unique(p8) # what cells are in this 8 voxel kernel?
slabels['label'][i,j,k] = ''.join(map(str,vals))
if (vals.shape[0] == 1) and (vals[0] == 0): continue # skip empty
pnts[pcnt] = (''.join(map(str,vals)), (i,j,k)) # store label & point indices
pcnt += 1
pnts = pnts[0:pcnt] # downsize array
pnts.sort(order=['label']) # sort array
# output vtk data files
print "output vtk data files"
current = pnts['label'][0]
lcnt = 0
for i, p in enumerate(pnts):
if p['label'] == current: lcnt += 1
else:
geo.save_vtk(tdir, current, pnts['ijk'][i-lcnt:i], [xsize, ysize,zsize])
lcnt = 1
current = p['label']
geo.save_vtk(tdir, current, pnts['ijk'][i-lcnt:i], [xsize, ysize,zsize]) # one more time
# output gmsh geo file
##print "output gmsh geo file"
##geo.save_geo(tdir+"cells.geo", [xsize,ysize,zsize], pnts, slabels)
# execution time
print("%dsec" % (tm.time() - t1))
print
| gpl-3.0 | 4,789,773,116,387,446,000 | 30.166667 | 88 | 0.643357 | false |
DuCalixte/stock_scraper | simple_cgi_python_server.py | 1 | 2887 | #!/usr/bin/python
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from os import curdir, sep
import sys, getopt
import cgi
class ServerHandler(BaseHTTPRequestHandler):
# Handler for the GET requests
def do_GET(self):
if self.path=="/":
self.path="/app/views/index.html"
try:
# Check the file extension required and set the right mime type
sendReply = False
if self.path.endswith(".html"):
mimetype='text/html'
sendReply = True
if self.path.endswith(".json"):
mimetype='text/json'
sendReply = True
if self.path.endswith(".xml"):
mimetype='text/xml'
sendReply = True
if self.path.endswith(".png"):
mimetype='image/png'
sendReply = True
if self.path.endswith(".jpg"):
mimetype='image/jpg'
sendReply = True
if self.path.endswith(".gif"):
mimetype='image/gif'
sendReply = True
if self.path.endswith(".css"):
mimetype='text/css'
sendReply = True
if self.path.endswith(".js"):
mimetype='application/javascript'
sendReply = True
if sendReply == True:
# Open the static file requested and send it
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type',mimetype)
self.end_headers()
self.wfile.write(f.read())
f.close()
return
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
# Handler for the POST requests
def do_POST(self):
if self.path=="submit" or self.path=="send":
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers,
environ={'REQUEST_METHOD':'POST', 'CONTENT_TYPE':self.headers['Content-Type'],}
)
# print "Your name is: %s" % form["your_name"].value
self.send_response(200)
self.end_headers()
# self.wfile.write("Thanks %s !" % form["your_name"].value)
return
def main(argv=None):
address = '127.0.0.1'
port = 8000
if argv is not None:
try:
opts, args = getopt.getopt(argv,"hp:a:",["port=","address="])
except getopt.GetoptError:
print 'simple_cgi_python_server.py -p <port> -a <address>'
for opt, arg in opts:
if opt == '-h':
print 'simple_cgi_python_server.py -p <port> -a <address>'
elif opt in ("-p", "--port"):
try:
port = int(arg)
except ValueError:
print "This port [", arg, "] is incorrect, try a valid integer for port..."
sys.exit(3)
elif opt in ("-a", "--address"):
address = arg
try:
# Create a web server and define the handler to manage the incoming request
server = HTTPServer((address, port), ServerHandler)
socket_info = server.socket.getsockname()
print "Serving HTTP on", socket_info[0], "port", socket_info[1], "..."
# Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
if __name__ == '__main__':
main(sys.argv[1:])
| mit | -4,768,539,710,734,336,000 | 26.759615 | 83 | 0.652927 | false |
Lazza/RecuperaBit | recuperabit/utils.py | 1 | 11140 | """Collection of utility functions."""
# RecuperaBit
# Copyright 2014-2021 Andrea Lazzarotto
#
# This file is part of RecuperaBit.
#
# RecuperaBit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RecuperaBit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RecuperaBit. If not, see <http://www.gnu.org/licenses/>.
import logging
import pprint
import string
import sys
import time
import unicodedata
from .fs.constants import sector_size
printer = pprint.PrettyPrinter(indent=4)
all_chars = (chr(i) for i in range(sys.maxunicode))
unicode_printable = set(
c for c in all_chars
if not unicodedata.category(c)[0].startswith('C')
)
ascii_printable = set(string.printable[:-5])
def sectors(image, offset, size, bsize=sector_size, fill=True):
"""Read from a file descriptor."""
read = True
try:
image.seek(offset * bsize)
except (IOError, OverflowError, ValueError):
read = False
if read:
try:
dump = image.read(size * bsize)
except (IOError, MemoryError):
logging.warning(
"Cannot read sector(s). Filling with 0x00. Offset: {} Size: "
"{} Bsize: {}".format(offset, size, bsize)
)
read = False
if not read:
if fill:
dump = size * bsize * b'\x00'
else:
return None
return bytearray(dump)
def unixtime(dtime):
"""Convert datetime to UNIX epoch."""
if dtime is None:
return 0
try:
return time.mktime(dtime.timetuple())
except ValueError:
return 0
# format:
# [(label, (formatter, lower, higher)), ...]
def unpack(data, fmt):
"""Extract formatted information from a string of bytes."""
result = {}
for label, description in fmt:
formatter, lower, higher = description
# If lower is a function, then apply it
low = lower(result) if callable(lower) else lower
high = higher(result) if callable(higher) else higher
if low is None or high is None:
result[label] = None
continue
if callable(formatter):
result[label] = formatter(data[low:high+1])
else:
if formatter == 's':
result[label] = str(data[low:high+1])
if formatter.startswith('utf'):
result[label] = data[low:high+1].decode(formatter)
if formatter.endswith('i') and len(formatter) < 4:
# Use little-endian by default. Big-endian with >i.
# Force sign-extension of first bit with >+i / +i.
chunk = data[low:high+1]
signed = False
if '+' in formatter:
signed = True
byteorder = 'little'
if formatter.startswith('>'):
byteorder = 'big'
if len(chunk):
result[label] = int.from_bytes(chunk, byteorder=byteorder, signed=signed)
else:
result[label] = None
return result
def feed_all(image, scanners, indexes):
# Scan the disk image and feed the scanners
interesting = []
for index in indexes:
sector = sectors(image, index, 1, fill=False)
if not sector:
break
for instance in scanners:
res = instance.feed(index, sector)
if res is not None:
logging.info('Found {} at sector {}'.format(res, index))
interesting.append(index)
return interesting
def printable(text, default='.', alphabet=None):
"""Replace unprintable characters in a text with a default one."""
if alphabet is None:
alphabet = unicode_printable
return ''.join((i if i in alphabet else default) for i in text)
def pretty(dictionary):
"""Format dictionary with the pretty printer."""
return printer.pformat(dictionary)
def show(dictionary):
"""Print dictionary with the pretty printer."""
printer.pprint(dictionary)
def tiny_repr(element):
"""deprecated: Return a representation of unicode strings without the u."""
rep = repr(element)
return rep[1:] if type(element) == unicode else rep
def readable_bytes(amount):
"""Return a human readable string representing a size in bytes."""
if amount is None:
return '??? B'
if amount < 1:
return '%.2f B' % amount
powers = {
0: '', 1: 'K', 2: 'M', 3: 'G', 4: 'T'
}
biggest = max(i for i in powers if amount / 1024.**i >= 1)
scaled = amount / 1024.**biggest
return '%.2f %sB' % (scaled, powers[biggest])
def _file_tree_repr(node):
"""Give a nice representation for the tree."""
desc = (
' [GHOST]' if node.is_ghost else
' [DELETED]' if node.is_deleted else ''
)
tail = '/' if node.is_directory else ''
data = [
('Id', node.index),
('Offset', node.offset),
(
'Offset bytes',
node.offset * sector_size
if node.offset is not None else None
)
# ('MAC', node.mac)
]
if not node.is_directory:
data += [('Size', readable_bytes(node.size))]
return u'%s%s (%s) %s' % (
node.name, tail, ', '.join(a + ': ' + str(b) for a, b in data), desc
)
def tree_folder(directory, padding=0):
"""Return a tree-like textual representation of a directory."""
lines = []
pad = ' ' * padding
lines.append(
pad + _file_tree_repr(directory)
)
padding = padding + 2
pad = ' ' * padding
for entry in directory.children:
if len(entry.children) or entry.is_directory:
lines.append(tree_folder(entry, padding))
else:
lines.append(
pad + _file_tree_repr(entry)
)
return '\n'.join(lines)
def _bodyfile_repr(node, path):
"""Return a body file line for node."""
end = '/' if node.is_directory or len(node.children) else ''
return '|'.join(str(el) for el in [
'0', # MD5
path + node.name + end, # name
node.index, # inode
'0', '0', '0', # mode, UID, GID
node.size if node.size is not None else 0,
unixtime(node.mac['access']),
unixtime(node.mac['modification']),
unixtime(node.mac['creation']),
'0'
])
def bodyfile_folder(directory, path=''):
"""Create a body file compatible with TSK 3.x.
Format:
'#MD5|name|inode|mode_as_string|UID|GID|size|atime|mtime|ctime|crtime'
See also: http://wiki.sleuthkit.org/index.php?title=Body_file"""
lines = [_bodyfile_repr(directory, path)]
path += directory.name + '/'
for entry in directory.children:
if len(entry.children) or entry.is_directory:
lines += bodyfile_folder(entry, path)
else:
lines.append(_bodyfile_repr(entry, path))
return lines
def _ltx_clean(label):
"""Small filter to prepare strings to be included in LaTeX code."""
clean = str(label).replace('$', r'\$').replace('_', r'\_')
if clean[0] == '-':
clean = r'\textminus{}' + clean[1:]
return clean
def _tikz_repr(node):
"""Represent the node for a Tikz diagram."""
return r'node %s{%s\enskip{}%s}' % (
'[ghost]' if node.is_ghost else '[deleted]' if node.is_deleted else '',
_ltx_clean(node.index), _ltx_clean(node.name)
)
def tikz_child(directory, padding=0):
"""Write a child row for Tikz representation."""
pad = ' ' * padding
lines = [r'%schild {%s' % (pad, _tikz_repr(directory))]
count = len(directory.children)
for entry in directory.children:
content, number = tikz_child(entry, padding+4)
lines.append(content)
count += number
lines.append('}')
for entry in range(count):
lines.append('child [missing] {}')
return '\n'.join(lines).replace('\n}', '}'), count
def tikz_part(part):
"""Create LaTeX code to represent the directory structure as a nice Tikz
diagram.
See also: http://www.texample.net/tikz/examples/filesystem-tree/"""
preamble = (r"""%\usepackage{tikz}
%\usetikzlibrary{trees}""")
begin_tree = r"""\begin{tikzpicture}[%
grow via three points={one child at (1.75em,-1.75em) and
two children at (1.75em,-1.75em) and (1.75em,-3.5em)},
edge from parent path={(\tikzparentnode.south) |- (\tikzchildnode.west)}]
\scriptsize
"""
end_tree = r"""\end{tikzpicture}"""
lines = [r'\node [root] {File System Structure}']
lines += [tikz_child(entry, 4)[0] for entry in (part.root, part.lost)]
lines.append(';')
return '%s\n\n%s\n%s\n%s' % (
preamble, begin_tree, '\n'.join(lines), end_tree
)
def csv_part(part):
"""Provide a CSV representation for a partition."""
contents = [
','.join(('Id', 'Parent', 'Name', 'Modification Time',
'Access Time', 'Creation Time', 'Size (bytes)',
'Size (human)', 'Offset (bytes)', 'Offset (sectors)',
'Directory', 'Deleted', 'Ghost'))
]
for index in part.files:
obj = part.files[index]
contents.append(
u'%s,%s,"%s",%s,%s,%s,%s,%s,%s,%s,%s,%s,%s' % (
obj.index, obj.parent, obj.name,
obj.mac['modification'], obj.mac['access'],
obj.mac['creation'], obj.size,
readable_bytes(obj.size),
(obj.offset * sector_size
if obj.offset is not None else None),
obj.offset,
'1' if obj.is_directory else '',
'1' if obj.is_deleted else '',
'1' if obj.is_ghost else ''
)
)
return contents
def _sub_locate(text, directory, part):
"""Helper for locate."""
lines = []
for entry in sorted(directory.children, key=lambda node: node.name):
path = entry.full_path(part)
if text in path.lower():
lines.append((entry, path))
if len(entry.children) or entry.is_directory:
lines += _sub_locate(text, entry, part)
return lines
def locate(part, text):
"""Return paths of files matching the text."""
lines = []
text = text.lower()
lines += _sub_locate(text, part.lost, part)
lines += _sub_locate(text, part.root, part)
return lines
def merge(part, piece):
"""Merge piece into part (both are partitions)."""
for index in piece.files:
if (
index not in part.files or
part.files[index].is_ghost
):
part.add_file(piece.files[index])
| gpl-3.0 | -7,770,371,724,455,393,000 | 30.468927 | 93 | 0.572801 | false |
ahmetcemturan/SFACT | skeinforge_application/skeinforge_utilities/skeinforge_help.py | 1 | 3725 | """
Help has buttons and menu items to open help, blog and forum pages in your primary browser.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities import archive
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_profile
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getNewRepository():
'Get new repository.'
return HelpRepository()
class HelpRepository:
"A class to handle the help settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_utilities.skeinforge_help.html', self)
announcementsText = '- Announcements - '
announcementsLabel = settings.LabelDisplay().getFromName(announcementsText, self )
announcementsLabel.columnspan = 6
settings.LabelDisplay().getFromName('Fabmetheus Blog, Announcements & Questions:', self )
settings.HelpPage().getFromNameAfterHTTP('fabmetheus.blogspot.com/', 'Fabmetheus Blog', self )
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Documentation -', self )
settings.LabelDisplay().getFromName('Local Documentation Table of Contents: ', self )
settings.HelpPage().getFromNameSubName('Contents', self, 'contents.html')
settings.LabelDisplay().getFromName('Wiki Manual with Pictures & Charts: ', self )
settings.HelpPage().getFromNameAfterHTTP('fabmetheus.crsndoo.com/wiki/index.php/Skeinforge', 'Wiki Manual', self )
settings.LabelDisplay().getFromName('Skeinforge Overview: ', self )
settings.HelpPage().getFromNameSubName('Skeinforge Overview', self, 'skeinforge_application.skeinforge.html')
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('SFACT FAQs:', self)
settings.HelpPage().getFromNameAfterHTTP('http://titanpad.com/XTUJXiNHmd', ' SFACT FAQs ', self )
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Search -', self )
settings.LabelDisplay().getFromName('Reprap Search:', self )
settings.HelpPage().getFromNameAfterHTTP('members.axion.net/~enrique/search_reprap.html', 'Reprap Search', self )
settings.LabelDisplay().getFromName('Skeinforge Search:', self )
settings.HelpPage().getFromNameAfterHTTP('members.axion.net/~enrique/search_skeinforge.html', 'Skeinforge Search', self )
settings.LabelDisplay().getFromName('Web Search:', self )
settings.HelpPage().getFromNameAfterHTTP('members.axion.net/~enrique/search_web.html', 'Web Search', self )
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Troubleshooting -', self )
settings.LabelDisplay().getFromName('Skeinforge Forum:', self)
settings.HelpPage().getFromNameAfterHTTP('forums.reprap.org/list.php?154', ' Skeinforge Forum ', self )
settings.LabelSeparator().getFromRepository(self)
self.version = settings.LabelDisplay().getFromName('Version: ' + archive.getFileText(archive.getVersionFileName()), self)
self.wikiManualPrimary = settings.BooleanSetting().getFromValue('Wiki Manual Primary', self, True )
self.wikiManualPrimary.setUpdateFunction( self.save )
def save(self):
"Write the entities."
settings.writeSettingsPrintMessage(self)
| agpl-3.0 | -1,454,038,206,393,969,700 | 56.307692 | 163 | 0.743893 | false |
tivek/conan | conans/test/integration/symlinks_test.py | 1 | 6005 | import unittest
from conans.test.utils.tools import TestClient, TestServer
from conans.util.files import load, save
from conans.model.ref import PackageReference, ConanFileReference
import os
import platform
conanfile = """
from conans import ConanFile
from conans.util.files import save
import os
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def build(self):
save("file1.txt", "Hello1")
os.symlink("file1.txt", "file1.txt.1")
save("version1/file2.txt", "Hello2")
os.symlink("version1", "latest")
def package(self):
self.copy("*.txt*", links=True)
self.copy("*.so*", links=True)
"""
test_conanfile = """[requires]
Hello/0.1@lasote/stable
[imports]
., * -> .
"""
class SymLinksTest(unittest.TestCase):
def _check(self, client, ref, build=True):
folders = [client.paths.package(ref), client.current_folder]
if build:
folders.append(client.paths.build(ref))
for base in folders:
filepath = os.path.join(base, "file1.txt")
link = os.path.join(base, "file1.txt.1")
self.assertEqual(os.readlink(link), "file1.txt")
file1 = load(filepath)
self.assertEqual("Hello1", file1)
file1 = load(link)
self.assertEqual("Hello1", file1)
# Save any different string, random, or the base path
save(filepath, base)
self.assertEqual(load(link), base)
filepath = os.path.join(base, "version1")
link = os.path.join(base, "latest")
self.assertEqual(os.readlink(link), "version1")
filepath = os.path.join(base, "latest/file2.txt")
file1 = load(filepath)
self.assertEqual("Hello2", file1)
def basic_test(self):
if platform.system() == "Windows":
return
client = TestClient()
client.save({"conanfile.py": conanfile,
"conanfile.txt": test_conanfile})
client.run("export . lasote/stable")
client.run("install conanfile.txt --build")
ref = PackageReference.loads("Hello/0.1@lasote/stable:"
"5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
self._check(client, ref)
client.run("install conanfile.txt --build")
self._check(client, ref)
def package_files_test(self):
if platform.system() == "Windows":
return
client = TestClient()
conanfile = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Hello"
version = "0.1"
def package(self):
self.copy("*", symlinks=True)
"""
client.save({"recipe/conanfile.py": conanfile})
file1 = os.path.join(client.current_folder, "file1.txt")
file2 = os.path.join(client.current_folder, "version1/file2.txt")
file11 = os.path.join(client.current_folder, "file1.txt.1")
latest = os.path.join(client.current_folder, "latest")
save(file1, "Hello1")
os.symlink("file1.txt", file11)
save(file2, "Hello2")
os.symlink("version1", latest)
client.run("export-pkg ./recipe Hello/0.1@lasote/stable")
ref = PackageReference.loads("Hello/0.1@lasote/stable:"
"5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
self._check(client, ref, build=False)
def export_and_copy_test(self):
if platform.system() == "Windows":
return
lib_name = "libtest.so.2"
lib_contents = "TestLib"
link_name = "libtest.so"
client = TestClient()
client.save({"conanfile.py": conanfile,
"conanfile.txt": test_conanfile,
lib_name: lib_contents})
pre_export_link = os.path.join(client.current_folder, link_name)
os.symlink(lib_name, pre_export_link)
client.run("export . lasote/stable")
client.run("install conanfile.txt --build")
client.run("copy Hello/0.1@lasote/stable team/testing --all")
conan_ref = ConanFileReference.loads("Hello/0.1@lasote/stable")
team_ref = ConanFileReference.loads("Hello/0.1@team/testing")
package_ref = PackageReference(conan_ref,
"5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
team_package_ref = PackageReference(team_ref,
"5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
for folder in [client.paths.export(conan_ref), client.paths.source(conan_ref),
client.paths.build(package_ref), client.paths.package(package_ref),
client.paths.export(team_ref), client.paths.package(team_package_ref)]:
exported_lib = os.path.join(folder, lib_name)
exported_link = os.path.join(folder, link_name)
self.assertEqual(os.readlink(exported_link), lib_name)
self.assertEqual(load(exported_lib), load(exported_link))
self.assertTrue(os.path.islink(exported_link))
self._check(client, package_ref)
def upload_test(self):
if platform.system() == "Windows":
return
test_server = TestServer()
servers = {"default": test_server}
client = TestClient(servers=servers, users={"default": [("lasote", "mypass")]})
client.save({"conanfile.py": conanfile,
"conanfile.txt": test_conanfile})
client.run("export . lasote/stable")
client.run("install conanfile.txt --build")
ref = PackageReference.loads("Hello/0.1@lasote/stable:"
"5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
client.run("upload Hello/0.1@lasote/stable --all")
client.run('remove "*" -f')
client.save({"conanfile.txt": test_conanfile}, clean_first=True)
client.run("install conanfile.txt")
self._check(client, ref, build=False)
| mit | -6,822,026,028,091,206,000 | 35.174699 | 94 | 0.594005 | false |
Diyago/Machine-Learning-scripts | DEEP LEARNING/Pytorch from scratch/word2vec-embeddings/utils.py | 1 | 1512 | import re
from collections import Counter
def preprocess(text):
# Replace punctuation with tokens so we can use them in our model
text = text.lower()
text = text.replace(".", " <PERIOD> ")
text = text.replace(",", " <COMMA> ")
text = text.replace('"', " <QUOTATION_MARK> ")
text = text.replace(";", " <SEMICOLON> ")
text = text.replace("!", " <EXCLAMATION_MARK> ")
text = text.replace("?", " <QUESTION_MARK> ")
text = text.replace("(", " <LEFT_PAREN> ")
text = text.replace(")", " <RIGHT_PAREN> ")
text = text.replace("--", " <HYPHENS> ")
text = text.replace("?", " <QUESTION_MARK> ")
# text = text.replace('\n', ' <NEW_LINE> ')
text = text.replace(":", " <COLON> ")
words = text.split()
# Remove all words with 5 or fewer occurences
word_counts = Counter(words)
trimmed_words = [word for word in words if word_counts[word] > 5]
return trimmed_words
def create_lookup_tables(words):
"""
Create lookup tables for vocabulary
:param words: Input list of words
:return: Two dictionaries, vocab_to_int, int_to_vocab
"""
word_counts = Counter(words)
# sorting the words from most to least frequent in text occurrence
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
# create int_to_vocab dictionaries
int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)}
vocab_to_int = {word: ii for ii, word in int_to_vocab.items()}
return vocab_to_int, int_to_vocab
| apache-2.0 | 2,550,184,521,592,176,000 | 34.162791 | 73 | 0.623677 | false |
conda/kapsel | conda_kapsel/internal/test/test_plugin_html.py | 1 | 5846 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from conda_kapsel.internal.plugin_html import cleanup_and_scope_form, html_tag
import pytest
def test_html_tag():
assert "<div></div>" == html_tag("div", "")
assert "<li>foo</li>" == html_tag("li", "foo")
assert "<div><&></div>" == html_tag("div", "<&>")
def test_cleanup_and_scope_form_requires_form_tag():
original = """
<div>
<input type="text" name="foo"/>
</div>
"""
with pytest.raises(ValueError) as excinfo:
cleanup_and_scope_form(original, "prefix.", dict(foo="bar"))
assert "does not have a root <form>" in repr(excinfo.value)
def test_cleanup_and_scope_form_complains_about_missing_name(capsys):
original = """
<form>
<input type="text"/>
</form>
"""
cleanup_and_scope_form(original, "prefix.", dict(foo="bar"))
out, err = capsys.readouterr()
assert err == "No 'name' attribute set on <input type=\"text\"/>\n"
assert out == ""
def test_cleanup_and_scope_form_text_input():
original = """
<form>
<input type="text" name="foo"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="bar"))
expected = """
<div>
<input name="prefix.foo" type="text" value="bar"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_multiple_text_inputs():
original = """
<form>
<input type="text" name="foo"/>
<input type="text" name="bar" value="wrong"/>
<input type="text" name="baz" value=""/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo=1, bar=2, baz=3))
expected = """
<div>
<input name="prefix.foo" type="text" value="1"/>
<input name="prefix.bar" type="text" value="2"/>
<input name="prefix.baz" type="text" value="3"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_missing_value():
original = """
<form>
<input type="text" name="foo"/>
</form>
"""
# we don't pass in a value for "foo", so the value attribute
# should be omitted
cleaned = cleanup_and_scope_form(original, "prefix.", dict())
expected = """
<div>
<input name="prefix.foo" type="text"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_textarea():
original = """
<form>
<textarea name="foo"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="bar"))
expected = """
<div>
<textarea name="prefix.foo">bar</textarea>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_checkbox_not_checked():
original = """
<form>
<input type="checkbox" name="foo" value="not_bar"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="bar"))
expected = """
<div>
<input name="prefix.foo" type="checkbox" value="not_bar"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_checkbox_checked():
original = """
<form>
<input type="checkbox" name="foo" value="bar"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="bar"))
expected = """
<div>
<input checked="" name="prefix.foo" type="checkbox" value="bar"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_checkbox_checked_bool_value():
original = """
<form>
<input type="checkbox" name="foo" value="bar"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo=True))
expected = """
<div>
<input checked="" name="prefix.foo" type="checkbox" value="bar"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_radio():
original = """
<form>
<input type="radio" name="foo" value="1"/>
<input type="radio" name="foo" value="2" checked/>
<input type="radio" name="foo" value="3"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="1"))
expected = """
<div>
<input checked="" name="prefix.foo" type="radio" value="1"/>
<input name="prefix.foo" type="radio" value="2"/>
<input name="prefix.foo" type="radio" value="3"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_select_using_value_attribute():
original = """
<form>
<select name="foo">
<option value="1">One</option>
<option value="2" selected>Two</option>
<option value="3">Three</option>
</select>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="1"))
expected = """
<div>
<select name="prefix.foo">
<option selected="" value="1">One</option>
<option value="2">Two</option>
<option value="3">Three</option>
</select>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_select_using_element_text():
original = """
<form>
<select name="foo">
<option>1</option>
<option selected>2</option>
<option>3</option>
</select>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="1"))
expected = """
<div>
<select name="prefix.foo">
<option selected="">1</option>
<option>2</option>
<option>3</option>
</select>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_leave_hidden_alone():
original = """
<form>
<input type="hidden" name="foo" value="bar"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="blah"))
# we should NOT set the value on a hidden
expected = """
<div>
<input name="prefix.foo" type="hidden" value="bar"/>
</div>
""".strip()
assert expected == cleaned
| bsd-3-clause | 5,754,885,811,641,007,000 | 20.809701 | 84 | 0.60479 | false |
dipanm/ytdragon | ytdragon/ytpage.py | 1 | 3399 | #!/usr/bin/python3 -u
import os
import logging
import string
import re
import urllib
from urllib import parse as urlparse
import pprint
from lxml import html
default_host = "youtube.com"
default_hurl = "https://"+default_host
skip_codes = { "#": "COMMENT", "@" : "DONE", "?" : "BAD_ITEM", "=" : "COMMAND" }
uidtype_map = { "v" : "video", "vid" : "video", "video" : "video",
"c" : "channel", "ch" : "channel", "channel" : "channel",
"u" : "user", "usr" : "user", "user" : "user",
"p" : "playlist", "pl" : "playlist", "playlist" : "playlist",
"l" : "ytlist", "yl" : "ytlist", "ytlist" : "ytlist" }
url_map = { "UNKNOWN" : "",
"ytlist" : "file://<ID>",
"video" : "/watch?v=<ID>",
"playlist": "/playlist?list=<ID>",
"user" : "/user/<ID>/videos" ,
"channel" : "/channel/<ID>/videos" }
def extract_id_q(parsed_url,query):
qdict = urlparse.parse_qs(parsed_url.query)
plid = qdict[query][0] if query in qdict else "UNKNOWN_ID"
return plid
def extract_id_p(parsed_url,pkey):
path = parsed_url.path.split('/')
i = 0
for p in path :
if pkey in p :
break;
else:
i += 1
uid = path[i+1] if (i <= len(path)) else "UNKNOWN_ID"
return uid
path_id_map ={ "watch" : { "uid_type":"video", "extract_id": extract_id_q, "key_ref": "v" },
"playlist" : { "uid_type":"playlist","extract_id": extract_id_q, "key_ref": "list" },
"user" : { "uid_type":"user", "extract_id": extract_id_p, "key_ref": "user" },
"channel" : { "uid_type":"channel", "extract_id": extract_id_p, "key_ref": "channel" }
}
def get_uid_from_ref(uid_str):
uid_type = "UNKNOWN"
uid = "UNKNOWN_ID"
if (len(uid_str) == 0 ):
return skip_codes["#"], skip_codes["#"], ""
if(uid_str[0] in skip_codes.keys()) :
status = skip_codes[uid_str[0]]
uid_str = uid_str[1:]
else:
status = "OK"
if re.match('^(http|https)://', uid_str): #and (sp_char == ""):
parsed_url = urlparse.urlparse(uid_str)
h = parsed_url.netloc
path = parsed_url.path
base_path = path.split("/")[1]
if default_host not in h:
uid_type = "UNKNOWN_HOST"
else:
if base_path in path_id_map:
uid_type = path_id_map[base_path]["uid_type"]
uid = path_id_map[base_path]["extract_id"](parsed_url,path_id_map[base_path]["key_ref"])
else:
uid_type = "UNKNOWN_PAGE"
else:
ul = uid_str.split("=",1)
uid_type = uidtype_map[ul[0]] if ul[0] in uidtype_map else "UNKNOWN_TYPE"
if len(ul) > 1 :
uid = ul[1].split("/")[0] if (uid_type != "ytlist") else ul[1]
else:
uid = "UNKNOWN_ID"
return status, uid_type, uid
def get_plid_from_url(string):
if(string[0] == '?'):
return '?'
if re.match('^(http|https)://', string):
url = string
para = url.split('?')[1].split(",")
for p in para:
key, value = p.split("=")
if(key == 'list'):
plid = value
else:
plid = string
return plid
def get_page(pagetype,uid):
page = { 'code' : -1, 'contents' : ""}
if(pagetype == "ytlist"):
url = "file://"+os.path.abspath(uid)
else:
url = default_hurl+url_map[pagetype].replace("<ID>",uid)
response = urllib.request.urlopen(url)
page['uid'] = uid
page['url'] = url
page['code'] = response.getcode()
page['contents'] = response.read().decode('utf-8')
page['len'] = len(page['contents'])
return page
| gpl-3.0 | -4,245,832,291,803,000,300 | 26.634146 | 104 | 0.559576 | false |
rahulunair/nova | nova/tests/unit/api/openstack/compute/test_server_groups.py | 1 | 37968 | # Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
import six
import webob
from nova.api.openstack import api_version_request as avr
from nova.api.openstack.compute import server_groups as sg_v21
from nova import context
from nova import exception
from nova import objects
from nova.policies import server_groups as sg_policies
from nova import test
from nova.tests import fixtures
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import policy_fixture
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def server_group_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
return sgroup
def server_group_resp_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
if 'policy' not in kwargs:
sgroup.setdefault('policies', [])
sgroup.setdefault('members', [])
return sgroup
def server_group_db(sg):
attrs = copy.deepcopy(sg)
if 'id' in attrs:
attrs['uuid'] = attrs.pop('id')
if 'policies' in attrs:
policies = attrs.pop('policies')
attrs['policies'] = policies
else:
attrs['policies'] = []
if 'policy' in attrs:
del attrs['policies']
if 'members' in attrs:
members = attrs.pop('members')
attrs['members'] = members
else:
attrs['members'] = []
attrs['deleted'] = 0
attrs['deleted_at'] = None
attrs['created_at'] = None
attrs['updated_at'] = None
if 'user_id' not in attrs:
attrs['user_id'] = fakes.FAKE_USER_ID
if 'project_id' not in attrs:
attrs['project_id'] = fakes.FAKE_PROJECT_ID
attrs['id'] = 7
return AttrDict(attrs)
class ServerGroupTestV21(test.NoDBTestCase):
USES_DB_SELF = True
validation_error = exception.ValidationError
wsgi_api_version = '2.1'
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self._setup_controller()
self.req = fakes.HTTPRequest.blank('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.foo_req = fakes.HTTPRequest.blank('', project_id='foo')
self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(fixtures.Database(database='api'))
cells = fixtures.CellDatabases()
cells.add_cell_database(uuidsentinel.cell1)
cells.add_cell_database(uuidsentinel.cell2)
self.useFixture(cells)
ctxt = context.get_admin_context()
self.cells = {}
for uuid in (uuidsentinel.cell1, uuidsentinel.cell2):
cm = objects.CellMapping(context=ctxt,
uuid=uuid,
database_connection=uuid,
transport_url=uuid)
cm.create()
self.cells[cm.uuid] = cm
def _setup_controller(self):
self.controller = sg_v21.ServerGroupController()
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
sgroup = server_group_template()
sgroup['policies'] = policies
res_dict = self.controller.create(self.req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group_with_new_policy_before_264(self):
req = fakes.HTTPRequest.blank('', version='2.63')
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
# 'policy' isn't an acceptable request key before 2.64
sgroup = server_group_template(policy=policy)
result = self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
self.assertIn(
"Invalid input for field/attribute server_group",
six.text_type(result)
)
# 'rules' isn't an acceptable request key before 2.64
sgroup = server_group_template(rules=rules)
result = self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
self.assertIn(
"Invalid input for field/attribute server_group",
six.text_type(result)
)
def test_create_server_group(self):
policies = ['affinity', 'anti-affinity']
for policy in policies:
self._create_server_group_normal(policies=[policy])
def test_create_server_group_rbac_default(self):
sgroup = server_group_template()
sgroup['policies'] = ['affinity']
# test as admin
self.controller.create(self.admin_req, body={'server_group': sgroup})
# test as non-admin
self.controller.create(self.req, body={'server_group': sgroup})
def test_create_server_group_rbac_admin_only(self):
sgroup = server_group_template()
sgroup['policies'] = ['affinity']
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'create'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
self.controller.create(self.admin_req, body={'server_group': sgroup})
# check for failure as non-admin
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create, self.req,
body={'server_group': sgroup})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def _create_instance(self, ctx, cell):
with context.target_cell(ctx, cell) as cctx:
instance = objects.Instance(context=cctx,
image_ref=uuidsentinel.fake_image_ref,
node='node1', reservation_id='a',
host='host1',
project_id=fakes.FAKE_PROJECT_ID,
vm_state='fake',
system_metadata={'key': 'value'})
instance.create()
im = objects.InstanceMapping(context=ctx,
project_id=ctx.project_id,
user_id=ctx.user_id,
cell_mapping=cell,
instance_uuid=instance.uuid)
im.create()
return instance
def _create_instance_group(self, context, members):
ig = objects.InstanceGroup(context=context, name='fake_name',
user_id='fake_user', project_id=fakes.FAKE_PROJECT_ID,
members=members)
ig.create()
return ig.uuid
def _create_groups_and_instances(self, ctx):
cell1 = self.cells[uuidsentinel.cell1]
cell2 = self.cells[uuidsentinel.cell2]
instances = [self._create_instance(ctx, cell=cell1),
self._create_instance(ctx, cell=cell2),
self._create_instance(ctx, cell=None)]
members = [instance.uuid for instance in instances]
ig_uuid = self._create_instance_group(ctx, members)
return (ig_uuid, instances, members)
def _test_list_server_group_all(self, api_version='2.1'):
self._test_list_server_group(api_version=api_version,
limited='',
path='/os-server-groups?all_projects=True')
def _test_list_server_group_offset_and_limit(self, api_version='2.1'):
self._test_list_server_group(api_version=api_version,
limited='&offset=1&limit=1',
path='/os-server-groups?all_projects=True')
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
@mock.patch('nova.objects.InstanceGroupList.get_all')
def _test_list_server_group(self, mock_get_all, mock_get_by_project,
path, api_version='2.1', limited=None):
policies = ['anti-affinity']
policy = "anti-affinity"
members = []
metadata = {} # always empty
names = ['default-x', 'test']
p_id = fakes.FAKE_PROJECT_ID
u_id = fakes.FAKE_USER_ID
ver = avr.APIVersionRequest(api_version)
if ver >= avr.APIVersionRequest("2.64"):
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policy=policy,
rules={},
members=members,
project_id=p_id,
user_id=u_id)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policy=policy,
rules={},
members=members,
project_id=p_id,
user_id=u_id)
elif ver >= avr.APIVersionRequest("2.13"):
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policies=policies,
members=members,
metadata=metadata,
project_id=p_id,
user_id=u_id)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policies=policies,
members=members,
metadata=metadata,
project_id=p_id,
user_id=u_id)
else:
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policies=policies,
members=members,
metadata=metadata)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policies=policies,
members=members,
metadata=metadata)
tenant_groups = [sg2]
all_groups = [sg1, sg2]
if limited:
all = {'server_groups': [sg2]}
tenant_specific = {'server_groups': []}
else:
all = {'server_groups': all_groups}
tenant_specific = {'server_groups': tenant_groups}
def return_all_server_groups():
return objects.InstanceGroupList(
objects=[objects.InstanceGroup(
**server_group_db(sg)) for sg in all_groups])
mock_get_all.return_value = return_all_server_groups()
def return_tenant_server_groups():
return objects.InstanceGroupList(
objects=[objects.InstanceGroup(
**server_group_db(sg)) for sg in tenant_groups])
mock_get_by_project.return_value = return_tenant_server_groups()
path = path or '/os-server-groups?all_projects=True'
if limited:
path += limited
req = fakes.HTTPRequest.blank(path, version=api_version)
admin_req = fakes.HTTPRequest.blank(path, use_admin_context=True,
version=api_version)
# test as admin
res_dict = self.controller.index(admin_req)
self.assertEqual(all, res_dict)
# test as non-admin
res_dict = self.controller.index(req)
self.assertEqual(tenant_specific, res_dict)
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
def _test_list_server_group_by_tenant(self, mock_get_by_project,
api_version='2.1'):
policies = ['anti-affinity']
members = []
metadata = {} # always empty
names = ['default-x', 'test']
p_id = fakes.FAKE_PROJECT_ID
u_id = fakes.FAKE_USER_ID
if api_version >= '2.13':
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policies=policies,
members=members,
metadata=metadata,
project_id=p_id,
user_id=u_id)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policies=policies,
members=members,
metadata=metadata,
project_id=p_id,
user_id=u_id)
else:
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policies=policies,
members=members,
metadata=metadata)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policies=policies,
members=members,
metadata=metadata)
groups = [sg1, sg2]
expected = {'server_groups': groups}
def return_server_groups():
return objects.InstanceGroupList(
objects=[objects.InstanceGroup(
**server_group_db(sg)) for sg in groups])
return_get_by_project = return_server_groups()
mock_get_by_project.return_value = return_get_by_project
path = '/os-server-groups'
req = fakes.HTTPRequest.blank(path, version=api_version)
res_dict = self.controller.index(req)
self.assertEqual(expected, res_dict)
def test_display_members(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
res_dict = self.controller.show(self.req, ig_uuid)
result_members = res_dict['server_group']['members']
self.assertEqual(3, len(result_members))
for member in members:
self.assertIn(member, result_members)
def test_display_members_with_nonexistent_group(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, uuidsentinel.group)
def test_display_active_members_only(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
# delete an instance
im = objects.InstanceMapping.get_by_instance_uuid(ctx,
instances[1].uuid)
with context.target_cell(ctx, im.cell_mapping) as cctxt:
instances[1]._context = cctxt
instances[1].destroy()
# check that the instance does not exist
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid,
ctx, instances[1].uuid)
res_dict = self.controller.show(self.req, ig_uuid)
result_members = res_dict['server_group']['members']
# check that only the active instance is displayed
self.assertEqual(2, len(result_members))
self.assertIn(instances[0].uuid, result_members)
def test_display_members_rbac_default(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
ig_uuid = self._create_groups_and_instances(ctx)[0]
# test as admin
self.controller.show(self.admin_req, ig_uuid)
# test as non-admin, same project
self.controller.show(self.req, ig_uuid)
# test as non-admin, different project
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.foo_req, ig_uuid)
def test_display_members_rbac_admin_only(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
ig_uuid = self._create_groups_and_instances(ctx)[0]
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'show'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
self.controller.show(self.admin_req, ig_uuid)
# check for failure as non-admin
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, self.req, ig_uuid)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_create_server_group_with_non_alphanumeric_in_name(self):
# The fix for bug #1434335 expanded the allowable character set
# for server group names to include non-alphanumeric characters
# if they are printable.
sgroup = server_group_template(name='good* $%name',
policies=['affinity'])
res_dict = self.controller.create(self.req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'good* $%name')
def test_create_server_group_with_illegal_name(self):
# blank name
sgroup = server_group_template(name='', policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with length 256
sgroup = server_group_template(name='1234567890' * 26,
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# non-string name
sgroup = server_group_template(name=12, policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with leading spaces
sgroup = server_group_template(name=' leading spaces',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with trailing spaces
sgroup = server_group_template(name='trailing space ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with all spaces
sgroup = server_group_template(name=' ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with unprintable character
sgroup = server_group_template(name='bad\x00name',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with out of range char U0001F4A9
sgroup = server_group_template(name=u"\U0001F4A9",
policies=['affinity'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
# blank policy
sgroup = server_group_template(name='fake-name', policies='')
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(name='fake-name', policies=7)
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(name='fake-name', policies='invalid')
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(name='fake-name', policies=None)
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_conflicting_policies(self):
sgroup = server_group_template()
policies = ['anti-affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_duplicate_policies(self):
sgroup = server_group_template()
policies = ['affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_not_supported(self):
sgroup = server_group_template()
policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_no_body(self):
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=None)
def test_create_server_group_with_no_server_group(self):
body = {'no-instanceGroup': None}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_list_server_group_by_tenant(self):
self._test_list_server_group_by_tenant(
api_version=self.wsgi_api_version)
def test_list_server_group_all_v20(self):
self._test_list_server_group_all(api_version='2.0')
def test_list_server_group_all(self):
self._test_list_server_group_all(
api_version=self.wsgi_api_version)
def test_list_server_group_offset_and_limit(self):
self._test_list_server_group_offset_and_limit(
api_version=self.wsgi_api_version)
def test_list_server_groups_rbac_default(self):
# test as admin
self.controller.index(self.admin_req)
# test as non-admin
self.controller.index(self.req)
def test_list_server_group_multiple_param(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
limited='&offset=2&limit=2&limit=1&offset=1',
path='/os-server-groups?all_projects=False&all_projects=True')
def test_list_server_group_additional_param(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
limited='&offset=1&limit=1',
path='/os-server-groups?dummy=False&all_projects=True')
def test_list_server_group_param_as_int(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
limited='&offset=1&limit=1',
path='/os-server-groups?all_projects=1')
def test_list_server_group_negative_int_as_offset(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&offset=-1',
path='/os-server-groups?all_projects=1')
def test_list_server_group_string_int_as_offset(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&offset=dummy',
path='/os-server-groups?all_projects=1')
def test_list_server_group_multiparam_string_as_offset(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&offset=dummy&offset=1',
path='/os-server-groups?all_projects=1')
def test_list_server_group_negative_int_as_limit(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&limit=-1',
path='/os-server-groups?all_projects=1')
def test_list_server_group_string_int_as_limit(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&limit=dummy',
path='/os-server-groups?all_projects=1')
def test_list_server_group_multiparam_string_as_limit(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&limit=dummy&limit=1',
path='/os-server-groups?all_projects=1')
def test_list_server_groups_rbac_admin_only(self):
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'index'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
self.controller.index(self.admin_req)
# check for failure as non-admin
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.objects.InstanceGroup.destroy')
def test_delete_server_group_by_id(self, mock_destroy):
sg = server_group_template(id=uuidsentinel.sg1_id)
def return_server_group(_cls, context, group_id):
self.assertEqual(sg['id'], group_id)
return objects.InstanceGroup(**server_group_db(sg))
self.stub_out('nova.objects.InstanceGroup.get_by_uuid',
return_server_group)
resp = self.controller.delete(self.req, uuidsentinel.sg1_id)
mock_destroy.assert_called_once_with()
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller, sg_v21.ServerGroupController):
status_int = self.controller.delete.wsgi_code
else:
status_int = resp.status_int
self.assertEqual(204, status_int)
def test_delete_non_existing_server_group(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, 'invalid')
def test_delete_server_group_rbac_default(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
# test as admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
self.controller.delete(self.admin_req, ig_uuid)
# test as non-admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
self.controller.delete(self.req, ig_uuid)
def test_delete_server_group_rbac_admin_only(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'delete'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
self.controller.delete(self.admin_req, ig_uuid)
# check for failure as non-admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.delete, self.req, ig_uuid)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class ServerGroupTestV213(ServerGroupTestV21):
wsgi_api_version = '2.13'
def _setup_controller(self):
self.controller = sg_v21.ServerGroupController()
def test_list_server_group_all(self):
self._test_list_server_group_all(api_version='2.13')
def test_list_server_group_offset_and_limit(self):
self._test_list_server_group_offset_and_limit(api_version='2.13')
def test_list_server_group_by_tenant(self):
self._test_list_server_group_by_tenant(api_version='2.13')
class ServerGroupTestV264(ServerGroupTestV213):
wsgi_api_version = '2.64'
def _setup_controller(self):
self.controller = sg_v21.ServerGroupController()
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
sgroup = server_group_template()
sgroup['rules'] = rules or {}
sgroup['policy'] = policy
res_dict = self.controller.create(req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policy'], policy)
self.assertEqual(res_dict['server_group']['rules'], rules or {})
return res_dict['server_group']['id']
def test_list_server_group_all(self):
self._test_list_server_group_all(api_version=self.wsgi_api_version)
def test_create_and_show_server_group(self):
policies = ['affinity', 'anti-affinity']
for policy in policies:
g_uuid = self._create_server_group_normal(
policy=policy)
res_dict = self._display_server_group(g_uuid)
self.assertEqual(res_dict['server_group']['policy'], policy)
self.assertEqual(res_dict['server_group']['rules'], {})
def _display_server_group(self, uuid):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
group = self.controller.show(req, uuid)
return group
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=33)
def test_create_and_show_server_group_with_rules(self, mock_get_v):
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
g_uuid = self._create_server_group_normal(
policy=policy, rules=rules)
res_dict = self._display_server_group(g_uuid)
self.assertEqual(res_dict['server_group']['policy'], policy)
self.assertEqual(res_dict['server_group']['rules'], rules)
def test_create_affinity_server_group_with_invalid_policy(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body={'server_group': sgroup})
self.assertIn("Only anti-affinity policy supports rules",
six.text_type(result))
def test_create_anti_affinity_server_group_with_invalid_rules(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
# A negative test for key is unknown, the value is not positive
# and not integer
invalid_rules = [{'unknown_key': '3'},
{'max_server_per_host': 0},
{'max_server_per_host': 'foo'}]
for r in invalid_rules:
sgroup = server_group_template(policy='anti-affinity', rules=r)
result = self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
self.assertIn(
"Invalid input for field/attribute", six.text_type(result)
)
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=32)
def test_create_server_group_with_low_version_compute_service(self,
mock_get_v):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='anti-affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(
webob.exc.HTTPConflict,
self.controller.create, req, body={'server_group': sgroup})
self.assertIn("Creating an anti-affinity group with rule "
"max_server_per_host > 1 is not yet supported.",
six.text_type(result))
def test_create_server_group(self):
policies = ['affinity', 'anti-affinity']
for policy in policies:
self._create_server_group_normal(policy=policy)
def test_policies_since_264(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
# 'policies' isn't allowed in request >= 2.64
sgroup = server_group_template(policies=['anti-affinity'])
self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_create_server_group_without_policy(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
# 'policy' is required request key in request >= 2.64
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
# blank policy
sgroup = server_group_template(policy='')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(policy=7)
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(policy='invalid')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(policy=None)
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_additional_params(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
sgroup = server_group_template(unknown='unknown')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
class ServerGroupTestV275(ServerGroupTestV264):
wsgi_api_version = '2.75'
def test_list_server_group_additional_param_old_version(self):
self._test_list_server_group(api_version='2.74',
limited='&offset=1&limit=1',
path='/os-server-groups?dummy=False&all_projects=True')
def test_list_server_group_additional_param(self):
req = fakes.HTTPRequest.blank('/os-server-groups?dummy=False',
version=self.wsgi_api_version)
self.assertRaises(self.validation_error, self.controller.index,
req)
| apache-2.0 | 3,167,598,080,033,200,600 | 42.641379 | 79 | 0.570401 | false |
solus-project/evolve-sc | xng/application.py | 2 | 3076 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# This file is part of solus-sc
#
# Copyright © 2013-2018 Ikey Doherty <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
from .window import ScMainWindow
from gi.repository import Gio, Gtk, Gdk, GLib, Notify
from . import join_resource_path
import random
import time
SC_APP_ID = "com.solus_project.SoftwareCentre"
class ScApplication(Gtk.Application):
app_window = None
is_service_mode = False
updates_view = False
def activate_main_view(self):
self.ensure_window()
if self.updates_view:
self.app_window.mode_open = "updates"
else:
self.app_window.mode_open = "home"
self.app_window.present()
def ensure_window(self):
""" Ensure we have a window """
if self.app_window is None:
self.app_window = ScMainWindow(self)
def startup(self, app):
""" Main entry """
self.init_css()
def init_css(self):
""" Set up the CSS before we throw any windows up """
try:
f = Gio.File.new_for_path(join_resource_path("styling.css"))
css = Gtk.CssProvider()
css.load_from_file(f)
screen = Gdk.Screen.get_default()
prio = Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
Gtk.StyleContext.add_provider_for_screen(screen,
css,
prio)
except Exception as e:
print("Error loading CSS: {}".format(e))
def __init__(self):
Gtk.Application.__init__(
self,
application_id=SC_APP_ID,
flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE)
self.connect("activate", self.on_activate)
self.connect("startup", self.startup)
self.connect("command-line", self.handle_command_line)
self.connect("handle-local-options", self.handle_local_options)
option = GLib.OptionEntry()
option.long_name = "update-view"
option.short_name = 0
option.flags = 0
option.arg = GLib.OptionArg.NONE
option.arg_data = None
description = _("Open up the updates view of the application")
option.description = description
self.add_main_option_entries([option])
# Need notifications
Notify.init("Solus Software Centre")
# Make sure random is moar betterer
random.seed(time.time())
def on_activate(self, app):
""" Activate the primary view """
self.activate_main_view()
def handle_command_line(self, app, cmdline):
self.activate()
return 0
def handle_local_options(self, app, cmdline):
if cmdline.contains("update-view"):
self.updates_view = True
return -1
| gpl-2.0 | -1,940,108,545,832,785,200 | 30.060606 | 72 | 0.595772 | false |
multidadosti-erp/multidadosti-addons | general_payments/models/account_payment.py | 1 | 8028 | from datetime import datetime
from odoo import api, fields, models
from odoo.tools.translate import _
from odoo.exceptions import UserError, ValidationError
class AccountPayment(models.Model):
_inherit = 'account.payment'
payment_amount_original = fields.Monetary(string='Original Value',
readonly=True)
launch_journal_id = fields.Many2one(comodel_name='account.journal',
string='Launch Journal')
general_account_id = fields.Many2one(comodel_name='account.account',
string='Account')
analytic_account_id = fields.Many2one(string='Analytic Account',
comodel_name='account.analytic.account')
analytic_tag_ids = fields.Many2many(string='Analytic Tags',
comodel_name='account.analytic.tag')
description = fields.Text(string='Description')
def _get_launch_move_values(self):
"""Generates a proper dict containing move values to create a launch
move record through 'account.payment' record creation.
Returns:
dict -- Dict containing values to create an 'account.move' record
"""
is_payment = True if self.payment_type == 'inbound' else False
date_now = datetime.strftime(datetime.now(), '%Y-%m-%d')
account_move_lines_base_dict = {
'partner_id': self.partner_id.id,
'date_maturity': date_now,
}
ml_debit = {
**account_move_lines_base_dict,
'debit': self.amount,
**self._get_launch_aml_vals(is_payment, is_debit_line=True),
}
ml_credit = {
**account_move_lines_base_dict,
'credit': self.amount,
**self._get_launch_aml_vals(is_payment, is_debit_line=False),
}
# Create account.move dict
move_values = {
'journal_id': self.launch_journal_id.id,
'account_id': self._get_liquidity_account(is_payment),
'date': date_now,
'line_ids': [(0, 0, ml_credit), (0, 0, ml_debit)],
}
return move_values
def _get_liquidity_launch_aml_vals(self, is_payment):
"""Generates a proper dict containing aml values to create the
liquidity move line record through 'account.payment' record creation.
Arguments:
is_payment {bool} -- Verifies if the record is launching an expense
or a revenue
Returns:
dict -- AML Liquidity values
"""
return {
'name': self._get_liquidity_launch_aml_name(is_payment),
'account_id': self._get_liquidity_account(is_payment),
'analytic_account_id': self.analytic_account_id.id,
'analytic_tag_ids': [(6, 0, self.analytic_tag_ids.ids)],
}
def _get_liquidity_launch_aml_name(self, is_payment):
"""Generates a proper name to liquidity move line record which will be
created through 'account.payment' record creation.
Arguments:
is_payment {bool} -- Verifies if the record is launching an expense
or a revenue
Returns:
str -- AML Liquidity name
"""
if is_payment:
payment_part_name = _('Revenue')
else:
payment_part_name = _('Expense')
partner_part_name = (_('Customer') if self.partner_type == 'customer'
else _('Supplier'))
return '%s - %s' % (partner_part_name, payment_part_name)
def _get_counterpart_launch_aml_vals(self):
"""Generates a proper dict containing aml values to create the
counterpart move line record through 'account.payment' record creation.
Returns:
dict -- AML Liquidity values
"""
return {
'account_id': self.general_account_id.id,
}
def _get_liquidity_account(self, is_payment):
if is_payment:
return self.partner_id.property_account_receivable_id.id
else:
return self.partner_id.property_account_payable_id.id
def _get_launch_aml_vals(self, is_payment, is_debit_line):
# Depending on 'is_payment' value, will return dict of payment move
# values or receivement move values to balance the payment record
if (is_debit_line and is_payment) or (not is_debit_line and not is_payment):
return self._get_liquidity_launch_aml_vals(is_payment)
else:
return self._get_counterpart_launch_aml_vals()
@api.multi
def post(self):
context = dict(self._context or {})
# Valid only in receivement('outbound') or payment('inbound')
ctx_move = context.get('financial_move', False)
active_model = context.get('active_model', False)
is_financial_move = (
True if ctx_move and active_model != 'account.move' else False)
for rec in self:
if rec.payment_type != 'transfer' and is_financial_move:
# Creates the 'launch' move record to link with payment move
# generated through 'account.payment' record creation
move_values = rec._get_launch_move_values()
move = self.env['account.move'].create(move_values)
move.post()
rec.write({
'move_id': move.id,
'payment_amount_original': move.amount,
})
else:
rec.payment_amount_original = rec.move_id.amount_residual
super(AccountPayment, self).post()
@api.multi
def cancel(self):
for rec in self:
# Lança o erro caso a opção "Permite cancelar lançamentos" esteja False
if not rec.move_id.journal_id.update_posted:
raise UserError(_('You cannot modify a posted entry of this journal.\nFirst you should set the journal to allow cancelling entries.'))
# Deletes reconcile records(account.partial.reconcile).
for line in rec.move_line_ids:
line.matched_credit_ids.unlink()
line.matched_debit_ids.unlink()
if not rec.invoice_ids:
liquidity_move = self.env['account.move.line'].search(
[('payment_id', '=', rec.id),
('move_id', '!=', rec.move_id.id)], limit=1).move_id
# Deletes the proper liquidity move record.
liquidity_move.button_cancel()
liquidity_move.unlink()
# Deletes the proper launch move record.
rec.move_id.button_cancel()
rec.move_id.unlink()
# Turns the payment state to cancel
super(AccountPayment, rec).cancel()
rec.move_name = False
@api.onchange('payment_type')
def _onchange_payment_type(self):
res = super(AccountPayment, self)._onchange_payment_type()
if not self.invoice_ids and not self.payment_type == 'transfer':
# Set account_account prefix
if self.payment_type == 'inbound':
acc_prefix = 3
journal_type = 'company_revenue'
elif self.payment_type == 'outbound':
acc_prefix = 4
journal_type = 'company_expense'
res['domain'] = {
**res['domain'],
'general_account_id': [('code_first_digit', '=', acc_prefix)],
'launch_journal_id': [('type', '=', journal_type)],
}
self.launch_journal_id = False
self.general_account_id = False
return res
@api.constrains('destination_journal_id', 'journal_id')
def _check_destination_journal_id(self):
if self.destination_journal_id == self.journal_id:
raise ValidationError(_(
'You can not make a transfer to the same journal'))
| agpl-3.0 | 1,683,337,759,244,076,000 | 37.763285 | 150 | 0.56817 | false |
won0089/oppia | scripts/install_third_party.py | 1 | 9591 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Installation script for Oppia third-party libraries."""
import contextlib
import itertools
import json
import os
import shutil
import StringIO
import tarfile
import urllib
import urllib2
import zipfile
import common
TOOLS_DIR = os.path.join('..', 'oppia_tools')
THIRD_PARTY_DIR = os.path.join('.', 'third_party')
THIRD_PARTY_STATIC_DIR = os.path.join(THIRD_PARTY_DIR, 'static')
MANIFEST_FILE_PATH = os.path.join(os.getcwd(), 'manifest.json')
# Place to download zip files for temporary storage.
TMP_UNZIP_PATH = os.path.join('.', 'tmp_unzip.zip')
# Check that the current directory is correct.
common.require_cwd_to_be_oppia(allow_deploy_dir=True)
TARGET_DOWNLOAD_DIRS = {
'frontend': THIRD_PARTY_STATIC_DIR,
'backend': THIRD_PARTY_DIR,
'oppiaTools': TOOLS_DIR
}
def download_files(source_url_root, target_dir, source_filenames):
"""Downloads a group of files and saves them to a given directory.
Each file is downloaded only if it does not already exist.
Args:
source_url_root: the URL to prepend to all the filenames.
target_dir: the directory to save the files to.
source_filenames: a list of filenames. Each filename is appended to the
end of the source_url_root in order to give the URL from which to
download the file. The downloaded file is then placed in target_dir,
and retains the same filename.
"""
assert isinstance(source_filenames, list)
common.ensure_directory_exists(target_dir)
for filename in source_filenames:
if not os.path.exists(os.path.join(target_dir, filename)):
print 'Downloading file %s to %s' % (filename, target_dir)
urllib.urlretrieve(
'%s/%s' % (source_url_root, filename),
os.path.join(target_dir, filename))
def download_and_unzip_files(
source_url, target_parent_dir, zip_root_name, target_root_name):
"""Downloads a zip file, unzips it, and saves the result in a given dir.
The download occurs only if the target directory that the zip file unzips
to does not exist.
NB: This function assumes that the root level of the zip file has exactly
one folder.
Args:
source_url: the URL from which to download the zip file.
target_parent_dir: the directory to save the contents of the zip file to.
zip_root_name: the name of the top-level folder in the zip directory.
target_root_name: the name that the top-level folder should be renamed to
in the local directory.
"""
if not os.path.exists(os.path.join(target_parent_dir, target_root_name)):
print 'Downloading and unzipping file %s to %s' % (
zip_root_name, target_parent_dir)
common.ensure_directory_exists(target_parent_dir)
urllib.urlretrieve(source_url, TMP_UNZIP_PATH)
try:
with zipfile.ZipFile(TMP_UNZIP_PATH, 'r') as z:
z.extractall(target_parent_dir)
os.remove(TMP_UNZIP_PATH)
except:
if os.path.exists(TMP_UNZIP_PATH):
os.remove(TMP_UNZIP_PATH)
# Some downloads (like jqueryui-themes) may require a user-agent.
req = urllib2.Request(source_url)
req.add_header('User-agent', 'python')
# This is needed to get a seekable filestream that can be used
# by zipfile.ZipFile.
file_stream = StringIO.StringIO(urllib2.urlopen(req).read())
with zipfile.ZipFile(file_stream, 'r') as z:
z.extractall(target_parent_dir)
# Rename the target directory.
os.rename(
os.path.join(target_parent_dir, zip_root_name),
os.path.join(target_parent_dir, target_root_name))
def download_and_untar_files(
source_url, target_parent_dir, tar_root_name, target_root_name):
"""Downloads a tar file, untars it, and saves the result in a given dir.
The download occurs only if the target directory that the tar file untars
to does not exist.
NB: This function assumes that the root level of the tar file has exactly
one folder.
Args:
source_url: the URL from which to download the tar file.
target_parent_dir: the directory to save the contents of the tar file to.
tar_root_name: the name of the top-level folder in the tar directory.
target_root_name: the name that the top-level folder should be renamed to
in the local directory.
"""
if not os.path.exists(os.path.join(target_parent_dir, target_root_name)):
print 'Downloading and untarring file %s to %s' % (
tar_root_name, target_parent_dir)
common.ensure_directory_exists(target_parent_dir)
urllib.urlretrieve(source_url, TMP_UNZIP_PATH)
with contextlib.closing(tarfile.open(TMP_UNZIP_PATH, 'r:gz')) as t:
t.extractall(target_parent_dir)
os.remove(TMP_UNZIP_PATH)
# Rename the target directory.
os.rename(
os.path.join(target_parent_dir, tar_root_name),
os.path.join(target_parent_dir, target_root_name))
def get_file_contents(filepath, mode='r'):
"""Gets the contents of a file, given a relative filepath from oppia/."""
with open(filepath, mode) as f:
return f.read().decode('utf-8')
def return_json(source_url):
"""Return json object when provided url
Args:
source_url: the URL of the json file.
Return:
a parsed json objects
"""
response = get_file_contents(source_url)
return json.loads(response)
def download_manifest_files(source_url):
"""This download all files to the required folders
Args:
source_url: the URL fof the json file.
"""
manifest_data = return_json(source_url)
dependencies = manifest_data['dependencies']
for data in dependencies:
dependency = dependencies[data]
for dependency_id in dependency:
dependency_contents = dependency[dependency_id]
if 'srcUrl' in dependency_contents:
DEPENDENCY_REV = dependency_contents['version']
DEPENDENCY_URL = dependency_contents['srcUrl']
DEPENDENCY_FILES = dependency_contents['files']
TARGET_DIRNAME = (
dependency_contents['targetDirPrefix'] + DEPENDENCY_REV)
DEPENDENCY_DST = os.path.join(
TARGET_DOWNLOAD_DIRS[data], TARGET_DIRNAME)
download_files(DEPENDENCY_URL, DEPENDENCY_DST, DEPENDENCY_FILES)
elif 'zipUrl' in dependency_contents:
DEPENDENCY_REV = dependency_contents['version']
DEPENDENCY_URL = dependency_contents['zipUrl']
if 'rootDir' in dependency_contents:
DEPENDENCY_ZIP_ROOT_NAME = dependency_contents['rootDir']
else:
DEPENDENCY_ZIP_ROOT_NAME = (
dependency_contents['rootDirPrefix'] + DEPENDENCY_REV)
if 'targetDir' in dependency_contents:
DEPENDENCY_TARGET_ROOT_NAME = (
dependency_contents['targetDir'])
else:
DEPENDENCY_TARGET_ROOT_NAME = (
dependency_contents['targetDirPrefix'] + DEPENDENCY_REV)
download_and_unzip_files(
DEPENDENCY_URL, TARGET_DOWNLOAD_DIRS[data],
DEPENDENCY_ZIP_ROOT_NAME, DEPENDENCY_TARGET_ROOT_NAME)
elif 'tarUrl' in dependency_contents:
DEPENDENCY_REV = dependency_contents['version']
DEPENDENCY_URL = dependency_contents['tarUrl']
DEPENDENCY_TAR_ROOT_NAME = (
dependency_contents['tarRootDirPrefix'] + DEPENDENCY_REV)
DEPENDENCY_TARGET_ROOT_NAME = (
dependency_contents['targetDirPrefix'] + DEPENDENCY_REV)
download_and_untar_files(
DEPENDENCY_URL, TARGET_DOWNLOAD_DIRS[data],
DEPENDENCY_TAR_ROOT_NAME, DEPENDENCY_TARGET_ROOT_NAME)
download_manifest_files(MANIFEST_FILE_PATH)
MATHJAX_REV = '2.4-latest'
MATHJAX_ROOT_NAME = 'MathJax-%s' % MATHJAX_REV
MATHJAX_ZIP_URL = (
'https://github.com/mathjax/MathJax/archive/v%s.zip' % MATHJAX_REV)
MATHJAX_ZIP_ROOT_NAME = MATHJAX_ROOT_NAME
MATHJAX_TARGET_ROOT_NAME = MATHJAX_ROOT_NAME
# MathJax is too big. Remove many unneeded files by following these
# instructions:
# https://github.com/mathjax/MathJax/wiki/Shrinking-MathJax-for-%22local%22-installation
MATHJAX_DIR_PREFIX = os.path.join(
THIRD_PARTY_STATIC_DIR, MATHJAX_TARGET_ROOT_NAME)
MATHJAX_SUBDIRS_TO_REMOVE = [
'unpacked', os.path.join('fonts', 'HTML-CSS', 'TeX', 'png')]
for subdir in MATHJAX_SUBDIRS_TO_REMOVE:
full_dir = os.path.join(MATHJAX_DIR_PREFIX, subdir)
if os.path.isdir(full_dir):
print 'Removing unnecessary MathJax directory \'%s\'' % subdir
shutil.rmtree(full_dir)
| apache-2.0 | -2,492,920,486,599,524,400 | 38.9625 | 90 | 0.648942 | false |
atodorov/dnf | dnf/cli/cli.py | 1 | 44597 | # Copyright 2005 Duke University
# Copyright (C) 2012-2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by Seth Vidal
"""
Command line interface yum class and related.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from . import output
from dnf.cli import CliError
from dnf.i18n import ucd, _
import collections
import datetime
import dnf
import dnf.cli.commands
import dnf.cli.commands.autoremove
import dnf.cli.commands.clean
import dnf.cli.commands.distrosync
import dnf.cli.commands.downgrade
import dnf.cli.commands.remove
import dnf.cli.commands.group
import dnf.cli.commands.install
import dnf.cli.commands.makecache
import dnf.cli.commands.mark
import dnf.cli.commands.reinstall
import dnf.cli.commands.repolist
import dnf.cli.commands.search
import dnf.cli.commands.updateinfo
import dnf.cli.commands.upgrade
import dnf.cli.commands.upgradeto
import dnf.cli.demand
import dnf.cli.option_parser
import dnf.conf
import dnf.conf.parser
import dnf.conf.substitutions
import dnf.const
import dnf.exceptions
import dnf.cli.format
import dnf.logging
import dnf.plugin
import dnf.persistor
import dnf.rpm
import dnf.sack
import dnf.util
import dnf.yum.config
import dnf.yum.misc
import hawkey
import logging
import operator
import os
import re
import sys
import time
logger = logging.getLogger('dnf')
def _add_pkg_simple_list_lens(data, pkg, indent=''):
""" Get the length of each pkg's column. Add that to data.
This "knows" about simpleList and printVer. """
na = len(pkg.name) + 1 + len(pkg.arch) + len(indent)
ver = len(pkg.evr)
rid = len(pkg.from_repo)
for (d, v) in (('na', na), ('ver', ver), ('rid', rid)):
data[d].setdefault(v, 0)
data[d][v] += 1
def _list_cmd_calc_columns(output, ypl):
""" Work out the dynamic size of the columns to pass to fmtColumns. """
data = {'na' : {}, 'ver' : {}, 'rid' : {}}
for lst in (ypl.installed, ypl.available, ypl.extras, ypl.autoremove,
ypl.updates, ypl.recent):
for pkg in lst:
_add_pkg_simple_list_lens(data, pkg)
if len(ypl.obsoletes) > 0:
for (npkg, opkg) in ypl.obsoletesTuples:
_add_pkg_simple_list_lens(data, npkg)
_add_pkg_simple_list_lens(data, opkg, indent=" " * 4)
data = [data['na'], data['ver'], data['rid']]
columns = output.calcColumns(data, remainder_column=1)
return (-columns[0], -columns[1], -columns[2])
def cachedir_fit(conf):
cli_cache = dnf.conf.CliCache(conf.cachedir)
return cli_cache.cachedir, cli_cache.system_cachedir
def print_versions(pkgs, base, output):
def sm_ui_time(x):
return time.strftime("%Y-%m-%d %H:%M", time.gmtime(x))
def sm_ui_date(x): # For changelogs, there is no time
return time.strftime("%Y-%m-%d", time.gmtime(x))
rpmdb_sack = dnf.sack.rpmdb_sack(base)
done = False
for pkg in rpmdb_sack.query().installed().filter(name=pkgs):
if done:
print("")
done = True
if pkg.epoch == '0':
ver = '%s-%s.%s' % (pkg.version, pkg.release, pkg.arch)
else:
ver = '%s:%s-%s.%s' % (pkg.epoch,
pkg.version, pkg.release, pkg.arch)
name = "%s%s%s" % (output.term.MODE['bold'], pkg.name,
output.term.MODE['normal'])
print(_(" Installed: %s-%s at %s") %(name, ver,
sm_ui_time(pkg.installtime)))
print(_(" Built : %s at %s") % (pkg.packager if pkg.packager else "",
sm_ui_time(pkg.buildtime)))
# :hawkey, no changelist information yet
# print(_(" Committed: %s at %s") % (pkg.committer,
# sm_ui_date(pkg.committime)))
class BaseCli(dnf.Base):
"""This is the base class for yum cli."""
def __init__(self, conf=None):
self.cmd_conf = CmdConf()
conf = conf or dnf.conf.Conf()
super(BaseCli, self).__init__(conf=conf)
self.output = output.Output(self, self.conf)
def _groups_diff(self):
if not self.group_persistor:
return None
return self.group_persistor.diff()
def do_transaction(self, display=()):
"""Take care of package downloading, checking, user
confirmation and actually running the transaction.
:param display: `rpm.callback.TransactionProgress` object(s)
:return: a numeric return code, and optionally a list of
errors. A negative return code indicates that errors
occurred in the pre-transaction checks
"""
grp_diff = self._groups_diff()
grp_str = self.output.list_group_transaction(self.comps, self.group_persistor, grp_diff)
if grp_str:
logger.info(grp_str)
trans = self.transaction
pkg_str = self.output.list_transaction(trans)
if pkg_str:
logger.info(pkg_str)
if trans:
# Check which packages have to be downloaded
downloadpkgs = []
rmpkgs = []
stuff_to_download = False
install_only = True
for tsi in trans:
installed = tsi.installed
if installed is not None:
stuff_to_download = True
downloadpkgs.append(installed)
erased = tsi.erased
if erased is not None:
install_only = False
rmpkgs.append(erased)
# Close the connection to the rpmdb so that rpm doesn't hold the
# SIGINT handler during the downloads.
del self.ts
# report the total download size to the user
if not stuff_to_download:
self.output.reportRemoveSize(rmpkgs)
else:
self.output.reportDownloadSize(downloadpkgs, install_only)
if trans or (grp_diff and not grp_diff.empty()):
# confirm with user
if self._promptWanted():
if self.conf.assumeno or not self.output.userconfirm():
raise CliError(_("Operation aborted."))
else:
logger.info(_('Nothing to do.'))
return
if trans:
if downloadpkgs:
logger.info(_('Downloading Packages:'))
try:
total_cb = self.output.download_callback_total_cb
self.download_packages(downloadpkgs, self.output.progress,
total_cb)
except dnf.exceptions.DownloadError as e:
specific = dnf.cli.format.indent_block(ucd(e))
errstring = _('Error downloading packages:\n%s') % specific
# setting the new line to prevent next chars being eaten up by carriage returns
print()
raise dnf.exceptions.Error(errstring)
# Check GPG signatures
self.gpgsigcheck(downloadpkgs)
if self.cmd_conf.downloadonly:
return
if not isinstance(display, collections.Sequence):
display = [display]
display = [output.CliTransactionDisplay()] + list(display)
super(BaseCli, self).do_transaction(display)
if trans:
msg = self.output.post_transaction_output(trans)
logger.info(msg)
def gpgsigcheck(self, pkgs):
"""Perform GPG signature verification on the given packages,
installing keys if possible.
:param pkgs: a list of package objects to verify the GPG
signatures of
:return: non-zero if execution should stop due to an error
:raises: Will raise :class:`Error` if there's a problem
"""
for po in pkgs:
result, errmsg = self.sigCheckPkg(po)
if result == 0:
# Verified ok, or verify not req'd
continue
elif result == 1:
ay = self.conf.assumeyes and not self.conf.assumeno
if not sys.stdin.isatty() and not ay:
raise dnf.exceptions.Error(_('Refusing to automatically import keys when running ' \
'unattended.\nUse "-y" to override.'))
# the callback here expects to be able to take options which
# userconfirm really doesn't... so fake it
fn = lambda x, y, z: self.output.userconfirm()
self.getKeyForPackage(po, fn)
else:
# Fatal error
raise dnf.exceptions.Error(errmsg)
return 0
def check_updates(self, patterns=(), reponame=None, print_=True):
"""Check updates matching given *patterns* in selected repository."""
ypl = self.returnPkgLists('upgrades', patterns, reponame=reponame)
if self.conf.obsoletes or self.conf.verbose:
typl = self.returnPkgLists('obsoletes', patterns, reponame=reponame)
ypl.obsoletes = typl.obsoletes
ypl.obsoletesTuples = typl.obsoletesTuples
if print_:
columns = _list_cmd_calc_columns(self.output, ypl)
if len(ypl.updates) > 0:
local_pkgs = {}
highlight = self.output.term.MODE['bold']
if highlight:
# Do the local/remote split we get in "yum updates"
for po in sorted(ypl.updates):
local = po.localPkg()
if os.path.exists(local) and po.verifyLocalPkg():
local_pkgs[(po.name, po.arch)] = po
cul = self.conf.color_update_local
cur = self.conf.color_update_remote
self.output.listPkgs(ypl.updates, '', outputType='list',
highlight_na=local_pkgs, columns=columns,
highlight_modes={'=' : cul, 'not in' : cur})
if len(ypl.obsoletes) > 0:
print(_('Obsoleting Packages'))
# The tuple is (newPkg, oldPkg) ... so sort by new
for obtup in sorted(ypl.obsoletesTuples,
key=operator.itemgetter(0)):
self.output.updatesObsoletesList(obtup, 'obsoletes',
columns=columns)
return ypl.updates or ypl.obsoletes
def upgrade_userlist_to(self, userlist, reponame=None):
oldcount = self._goal.req_length()
for l in userlist:
self.upgrade_to(l, reponame)
cnt = self._goal.req_length() - oldcount
if cnt <= 0:
raise dnf.exceptions.Error(_('No packages marked for upgrade.'))
def distro_sync_userlist(self, userlist):
""" Upgrade or downgrade packages to match the latest versions available
in the enabled repositories.
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
oldcount = self._goal.req_length()
if len(userlist) == 0:
self.distro_sync()
else:
for pkg_spec in userlist:
self.distro_sync(pkg_spec)
cnt = self._goal.req_length() - oldcount
if cnt <= 0 and not self._goal.req_has_distupgrade_all():
msg = _('No packages marked for distribution synchronization.')
raise dnf.exceptions.Error(msg)
def downgradePkgs(self, userlist):
"""Attempt to take the user specified list of packages or
wildcards and downgrade them. If a complete version number if
specified, attempt to downgrade them to the specified version
:param userlist: a list of names or wildcards specifying
packages to downgrade
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
oldcount = self._goal.req_length()
for arg in userlist:
wildcard = True if dnf.util.is_glob_pattern(arg) else False
if arg.endswith('.rpm'):
pkg = self.add_remote_rpm(arg)
self.package_downgrade(pkg)
continue # it was something on disk and it ended in rpm
# no matter what we don't go looking at repos
try:
self.downgrade_to(arg)
except dnf.exceptions.PackageNotFoundError as err:
msg = _('No package %s%s%s available.')
logger.info(msg, self.output.term.MODE['bold'], arg,
self.output.term.MODE['normal'])
except dnf.exceptions.PackagesNotInstalledError as err:
if not wildcard:
# glob pattern should not match not installed packages -> ignore error
for pkg in err.packages:
logger.info(_('No match for available package: %s'), pkg)
except dnf.exceptions.MarkingError:
assert False
cnt = self._goal.req_length() - oldcount
if cnt <= 0:
raise dnf.exceptions.Error(_('Nothing to do.'))
def output_packages(self, basecmd, pkgnarrow='all', patterns=(), reponame=None):
"""Output selection *pkgnarrow* of packages matching *patterns* and *repoid*."""
try:
highlight = self.output.term.MODE['bold']
ypl = self.returnPkgLists(
pkgnarrow, patterns, installed_available=highlight, reponame=reponame)
except dnf.exceptions.Error as e:
return 1, [str(e)]
else:
update_pkgs = {}
inst_pkgs = {}
local_pkgs = {}
columns = None
if basecmd == 'list':
# Dynamically size the columns
columns = _list_cmd_calc_columns(self.output, ypl)
if highlight and ypl.installed:
# If we have installed and available lists, then do the
# highlighting for the installed packages so you can see what's
# available to update, an extra, or newer than what we have.
for pkg in (ypl.hidden_available +
ypl.reinstall_available +
ypl.old_available):
key = (pkg.name, pkg.arch)
if key not in update_pkgs or pkg > update_pkgs[key]:
update_pkgs[key] = pkg
if highlight and ypl.available:
# If we have installed and available lists, then do the
# highlighting for the available packages so you can see what's
# available to install vs. update vs. old.
for pkg in ypl.hidden_installed:
key = (pkg.name, pkg.arch)
if key not in inst_pkgs or pkg > inst_pkgs[key]:
inst_pkgs[key] = pkg
if highlight and ypl.updates:
# Do the local/remote split we get in "yum updates"
for po in sorted(ypl.updates):
if po.reponame != hawkey.SYSTEM_REPO_NAME:
local_pkgs[(po.name, po.arch)] = po
# Output the packages:
clio = self.conf.color_list_installed_older
clin = self.conf.color_list_installed_newer
clir = self.conf.color_list_installed_reinstall
clie = self.conf.color_list_installed_extra
rip = self.output.listPkgs(ypl.installed, _('Installed Packages'), basecmd,
highlight_na=update_pkgs, columns=columns,
highlight_modes={'>' : clio, '<' : clin,
'=' : clir, 'not in' : clie})
clau = self.conf.color_list_available_upgrade
clad = self.conf.color_list_available_downgrade
clar = self.conf.color_list_available_reinstall
clai = self.conf.color_list_available_install
rap = self.output.listPkgs(ypl.available, _('Available Packages'), basecmd,
highlight_na=inst_pkgs, columns=columns,
highlight_modes={'<' : clau, '>' : clad,
'=' : clar, 'not in' : clai})
raep = self.output.listPkgs(ypl.autoremove, _('Autoremove Packages'),
basecmd, columns=columns)
rep = self.output.listPkgs(ypl.extras, _('Extra Packages'), basecmd,
columns=columns)
cul = self.conf.color_update_local
cur = self.conf.color_update_remote
rup = self.output.listPkgs(ypl.updates, _('Upgraded Packages'), basecmd,
highlight_na=local_pkgs, columns=columns,
highlight_modes={'=' : cul, 'not in' : cur})
# XXX put this into the ListCommand at some point
if len(ypl.obsoletes) > 0 and basecmd == 'list':
# if we've looked up obsolete lists and it's a list request
rop = [0, '']
print(_('Obsoleting Packages'))
for obtup in sorted(ypl.obsoletesTuples,
key=operator.itemgetter(0)):
self.output.updatesObsoletesList(obtup, 'obsoletes',
columns=columns)
else:
rop = self.output.listPkgs(ypl.obsoletes, _('Obsoleting Packages'),
basecmd, columns=columns)
rrap = self.output.listPkgs(ypl.recent, _('Recently Added Packages'),
basecmd, columns=columns)
if len(patterns) and \
rrap[0] and rop[0] and rup[0] and rep[0] and rap[0] and \
raep[0] and rip[0]:
raise dnf.exceptions.Error(_('No matching Packages to list'))
def returnPkgLists(self, pkgnarrow='all', patterns=None,
installed_available=False, reponame=None):
"""Return a :class:`dnf.yum.misc.GenericHolder` object containing
lists of package objects that match the given names or wildcards.
:param pkgnarrow: a string specifying which types of packages
lists to produce, such as updates, installed, available, etc.
:param patterns: a list of names or wildcards specifying
packages to list
:param installed_available: whether the available package list
is present as .hidden_available when doing all, available,
or installed
:param reponame: limit packages list to the given repository
:return: a :class:`dnf.yum.misc.GenericHolder` instance with the
following lists defined::
available = list of packageObjects
installed = list of packageObjects
upgrades = tuples of packageObjects (updating, installed)
extras = list of packageObjects
obsoletes = tuples of packageObjects (obsoleting, installed)
recent = list of packageObjects
"""
done_hidden_available = False
done_hidden_installed = False
if installed_available and pkgnarrow == 'installed':
done_hidden_available = True
pkgnarrow = 'all'
elif installed_available and pkgnarrow == 'available':
done_hidden_installed = True
pkgnarrow = 'all'
ypl = self.doPackageLists(
pkgnarrow, patterns, ignore_case=True, reponame=reponame)
if self.conf.showdupesfromrepos:
ypl.available += ypl.reinstall_available
if installed_available:
ypl.hidden_available = ypl.available
ypl.hidden_installed = ypl.installed
if done_hidden_available:
ypl.available = []
if done_hidden_installed:
ypl.installed = []
return ypl
def provides(self, args):
"""Print out a list of packages that provide the given file or
feature. This a cli wrapper to the provides methods in the
rpmdb and pkgsack.
:param args: the name of a file or feature to search for
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
# always in showdups mode
old_sdup = self.conf.showdupesfromrepos
self.conf.showdupesfromrepos = True
matches = []
for spec in args:
matches.extend(super(BaseCli, self). provides(spec))
for pkg in matches:
self.output.matchcallback_verbose(pkg, [], args)
self.conf.showdupesfromrepos = old_sdup
if not matches:
raise dnf.exceptions.Error(_('No Matches found'))
def _promptWanted(self):
# shortcut for the always-off/always-on options
if self.conf.assumeyes and not self.conf.assumeno:
return False
if self.conf.alwaysprompt:
return True
# prompt if:
# package was added to fill a dependency
# package is being removed
# package wasn't explicitly given on the command line
for txmbr in self.tsInfo.getMembers():
if txmbr.isDep or \
txmbr.name not in self.extcmds:
return True
# otherwise, don't prompt
return False
@staticmethod
def transaction_id_or_offset(extcmd):
"""Convert user input to a transaction ID or an offset from the end."""
try:
offset_str, = re.match('^last(-\d+)?$', extcmd).groups()
except AttributeError: # extcmd does not match the regex.
id_ = int(extcmd)
if id_ < 0:
# Negative return values are reserved for offsets.
raise ValueError('bad transaction ID given: %s' % extcmd)
return id_
else:
# Was extcmd 'last-N' or just 'last'?
offset = int(offset_str) if offset_str else 0
# Return offsets as negative numbers, where -1 means the last
# transaction as when indexing sequences.
return offset - 1
def _history_get_transactions(self, extcmds):
if not extcmds:
logger.critical(_('No transaction ID given'))
return None
tids = []
last = None
for extcmd in extcmds:
try:
id_or_offset = self.transaction_id_or_offset(extcmd)
except ValueError:
logger.critical(_('Bad transaction ID given'))
return None
if id_or_offset < 0:
if last is None:
cto = False
last = self.history.last(complete_transactions_only=cto)
if last is None:
logger.critical(_('Bad transaction ID given'))
return None
tids.append(str(last.tid + id_or_offset + 1))
else:
tids.append(str(id_or_offset))
old = self.history.old(tids)
if not old:
logger.critical(_('Not found given transaction ID'))
return None
return old
def history_get_transaction(self, extcmds):
old = self._history_get_transactions(extcmds)
if old is None:
return None
if len(old) > 1:
logger.critical(_('Found more than one transaction ID!'))
return old[0]
def history_rollback_transaction(self, extcmd):
"""Rollback given transaction."""
old = self.history_get_transaction((extcmd,))
if old is None:
return 1, ['Failed history rollback, no transaction']
last = self.history.last()
if last is None:
return 1, ['Failed history rollback, no last?']
if old.tid == last.tid:
return 0, ['Rollback to current, nothing to do']
mobj = None
for tid in self.history.old(list(range(old.tid + 1, last.tid + 1))):
if tid.altered_lt_rpmdb:
logger.warning(_('Transaction history is incomplete, before %u.'), tid.tid)
elif tid.altered_gt_rpmdb:
logger.warning(_('Transaction history is incomplete, after %u.'), tid.tid)
if mobj is None:
mobj = dnf.yum.history.YumMergedHistoryTransaction(tid)
else:
mobj.merge(tid)
tm = time.ctime(old.beg_timestamp)
print("Rollback to transaction %u, from %s" % (old.tid, tm))
print(self.output.fmtKeyValFill(" Undoing the following transactions: ",
", ".join((str(x) for x in mobj.tid))))
self.output.historyInfoCmdPkgsAltered(mobj) # :todo
history = dnf.history.open_history(self.history) # :todo
operations = dnf.history.NEVRAOperations()
for id_ in range(old.tid + 1, last.tid + 1):
operations += history.transaction_nevra_ops(id_)
hibeg = self.output.term.MODE['bold']
hiend = self.output.term.MODE['normal']
try:
self.history_undo_operations(operations)
except dnf.exceptions.PackagesNotInstalledError as err:
logger.info(_('No package %s%s%s installed.'),
hibeg, ucd(err.pkg_spec), hiend)
return 1, ['A transaction cannot be undone']
except dnf.exceptions.PackagesNotAvailableError as err:
logger.info(_('No package %s%s%s available.'),
hibeg, ucd(err.pkg_spec), hiend)
return 1, ['A transaction cannot be undone']
except dnf.exceptions.MarkingError:
assert False
else:
return 2, ["Rollback to transaction %u" % (old.tid,)]
def history_undo_transaction(self, extcmd):
"""Undo given transaction."""
old = self.history_get_transaction((extcmd,))
if old is None:
return 1, ['Failed history undo']
tm = time.ctime(old.beg_timestamp)
print("Undoing transaction %u, from %s" % (old.tid, tm))
self.output.historyInfoCmdPkgsAltered(old) # :todo
history = dnf.history.open_history(self.history) # :todo
hibeg = self.output.term.MODE['bold']
hiend = self.output.term.MODE['normal']
try:
self.history_undo_operations(history.transaction_nevra_ops(old.tid))
except dnf.exceptions.PackagesNotInstalledError as err:
logger.info(_('No package %s%s%s installed.'),
hibeg, ucd(err.pkg_spec), hiend)
return 1, ['An operation cannot be undone']
except dnf.exceptions.PackagesNotAvailableError as err:
logger.info(_('No package %s%s%s available.'),
hibeg, ucd(err.pkg_spec), hiend)
return 1, ['An operation cannot be undone']
except dnf.exceptions.MarkingError:
assert False
else:
return 2, ["Undoing transaction %u" % (old.tid,)]
class Cli(object):
def __init__(self, base):
self._system_cachedir = None
self.base = base
self.cli_commands = {}
self.command = None
self.demands = dnf.cli.demand.DemandSheet() #:cli
self.main_setopts = {}
self.nogpgcheck = False
self.repo_setopts = {}
self.register_command(dnf.cli.commands.autoremove.AutoremoveCommand)
self.register_command(dnf.cli.commands.clean.CleanCommand)
self.register_command(dnf.cli.commands.distrosync.DistroSyncCommand)
self.register_command(dnf.cli.commands.downgrade.DowngradeCommand)
self.register_command(dnf.cli.commands.group.GroupCommand)
self.register_command(dnf.cli.commands.install.InstallCommand)
self.register_command(dnf.cli.commands.makecache.MakeCacheCommand)
self.register_command(dnf.cli.commands.mark.MarkCommand)
self.register_command(dnf.cli.commands.reinstall.ReinstallCommand)
self.register_command(dnf.cli.commands.remove.RemoveCommand)
self.register_command(dnf.cli.commands.repolist.RepoListCommand)
self.register_command(dnf.cli.commands.search.SearchCommand)
self.register_command(dnf.cli.commands.updateinfo.UpdateInfoCommand)
self.register_command(dnf.cli.commands.upgrade.UpgradeCommand)
self.register_command(dnf.cli.commands.upgradeto.UpgradeToCommand)
self.register_command(dnf.cli.commands.InfoCommand)
self.register_command(dnf.cli.commands.ListCommand)
self.register_command(dnf.cli.commands.ProvidesCommand)
self.register_command(dnf.cli.commands.CheckUpdateCommand)
self.register_command(dnf.cli.commands.RepoPkgsCommand)
self.register_command(dnf.cli.commands.HelpCommand)
self.register_command(dnf.cli.commands.HistoryCommand)
def _configure_cachedir(self):
"""Set CLI-specific cachedir and its alternative."""
conf = self.base.conf
conf.cachedir, self._system_cachedir = cachedir_fit(conf)
logger.debug("cachedir: %s", conf.cachedir)
def _configure_repos(self, opts):
self.base.read_all_repos(self.repo_setopts)
if opts.repofrompath:
for label, path in opts.repofrompath.items():
if path[0] == '/':
path = 'file://' + path
repofp = dnf.repo.Repo(label, self.base.conf.cachedir)
repofp.baseurl = path
self.base.repos.add(repofp)
logger.info(_("Added %s repo from %s") % (label, path))
# Process repo enables and disables in order
try:
for (repo, operation) in opts.repos_ed:
repolist = self.base.repos.get_matching(repo)
if not repolist:
msg = _("Unknown repo: '%s'")
raise dnf.exceptions.RepoError(msg % repo)
if operation == "enable":
repolist.enable()
else:
repolist.disable()
except dnf.exceptions.ConfigError as e:
logger.critical(e)
self.print_usage()
sys.exit(1)
if self.nogpgcheck:
for repo in self.base.repos.values():
repo.gpgcheck = False
repo.repo_gpgcheck = False
for rid in self.base.repo_persistor.get_expired_repos():
repo = self.base.repos.get(rid)
if repo:
repo.md_expire_cache()
if opts.cacheonly:
self.demands.cacheonly = True
for repo in self.base.repos.values():
repo.basecachedir = self._system_cachedir
repo.md_only_cached = True
# setup the progress bars/callbacks
(bar, self.base.ds_callback) = self.base.output.setup_progress_callbacks()
self.base.repos.all().set_progress_bar(bar)
key_import = output.CliKeyImport(self.base, self.base.output)
self.base.repos.all().set_key_import(key_import)
def _log_essentials(self):
logger.debug('DNF version: %s', dnf.const.VERSION)
logger.log(dnf.logging.DDEBUG,
'Command: %s', self.cmdstring)
logger.log(dnf.logging.DDEBUG,
'Installroot: %s', self.base.conf.installroot)
logger.log(dnf.logging.DDEBUG, 'Releasever: %s',
self.base.conf.releasever)
def _process_demands(self):
demands = self.demands
repos = self.base.repos
if not demands.cacheonly:
if demands.freshest_metadata:
for repo in repos.iter_enabled():
repo.md_expire_cache()
elif not demands.fresh_metadata:
for repo in repos.values():
repo.md_lazy = True
if demands.sack_activation:
lar = self.demands.available_repos
self.base.fill_sack(load_system_repo='auto',
load_available_repos=lar)
if lar:
repos = list(self.base.repos.iter_enabled())
if repos:
mts = max(repo.metadata.timestamp for repo in repos)
# do not bother users with fractions of seconds
age = int(min(repo.metadata.age for repo in repos))
for repo in repos:
logger.debug(_("%s: using metadata from %s."),
repo.id,
time.ctime(repo.metadata.md_timestamp))
logger.info(_("Last metadata expiration check performed "
"%s ago on %s."),
datetime.timedelta(seconds=age),
time.ctime(mts))
self.base.plugins.run_sack()
def _root_and_conffile(self, installroot, conffile):
"""After the first parse of the cmdline options, find initial values for
installroot and conffile.
:return: installroot and conffile strings
"""
# If the conf file is inside the installroot - use that.
# otherwise look for it in the normal root
if installroot and conffile:
abs_fn = os.path.join(installroot, conffile)
if os.access(abs_fn, os.R_OK):
conffile = abs_fn
elif installroot:
conffile = dnf.const.CONF_FILENAME
abs_fn = os.path.join(installroot, conffile[1:])
if os.access(abs_fn, os.R_OK):
conffile = abs_fn
if installroot is None:
installroot = '/'
if conffile is None:
conffile = dnf.const.CONF_FILENAME
return installroot, conffile
def _parse_commands(self):
"""Check that the requested CLI command exists."""
if len(self.base.cmds) < 1:
logger.critical(_('You need to give some command'))
self.print_usage()
raise CliError
basecmd = self.base.cmds[0] # our base command
command_cls = self.cli_commands.get(basecmd)
if command_cls is None:
logger.critical(_('No such command: %s. Please use %s --help'),
basecmd, sys.argv[0])
if self.base.conf.plugins:
logger.info(_("It could be a DNF plugin command, "
"try: \"dnf install 'dnf-command(%s)'\""), basecmd)
else:
logger.info(_("It could be a DNF plugin command, "
"but loading of plugins is currently disabled."))
raise CliError
self.command = command_cls(self)
(base, ext) = self.command.canonical(self.base.cmds)
self.base.basecmd, self.base.extcmds = (base, ext)
logger.log(dnf.logging.DDEBUG, 'Base command: %s', base)
logger.log(dnf.logging.DDEBUG, 'Extra commands: %s', ext)
def _parse_setopts(self, setopts):
"""Parse setopts and repo_setopts."""
repoopts = {}
mainopts = dnf.yum.misc.GenericHolder()
mainopts.items = []
bad_setopt_tm = []
bad_setopt_ne = []
for item in setopts:
vals = item.split('=')
if len(vals) > 2:
bad_setopt_tm.append(item)
continue
if len(vals) < 2:
bad_setopt_ne.append(item)
continue
k,v = vals
period = k.find('.')
if period != -1:
repo = k[:period]
k = k[period+1:]
if repo not in repoopts:
repoopts[repo] = dnf.yum.misc.GenericHolder()
repoopts[repo].items = []
setattr(repoopts[repo], k, v)
repoopts[repo].items.append(k)
else:
setattr(mainopts, k, v)
mainopts.items.append(k)
self.main_setopts = mainopts
self.repo_setopts = repoopts
return bad_setopt_tm, bad_setopt_ne
def _get_first_config(self, opts):
config_args = ['noplugins', 'version', "quiet", "verbose", 'conffile',
'debuglevel', 'errorlevel', 'installroot', 'releasever',
'setopt']
in_dict = opts.__dict__
return {k: in_dict[k] for k in in_dict if k in config_args}
def configure(self, args):
"""Parse command line arguments, and set up :attr:`self.base.conf` and
:attr:`self.cmds`, as well as logger objects in base instance.
:param args: a list of command line arguments
"""
self.optparser = dnf.cli.option_parser.OptionParser()
opts, cmds = self.optparser.parse_known_args(args)
# Just print out the version if that's what the user wanted
if opts.version:
print(dnf.const.VERSION)
opts.quiet = True
opts.verbose = False
# go through all the setopts and set the global ones
bad_setopt_tm, bad_setopt_ne = self._parse_setopts(opts.setopts)
if self.main_setopts:
for opt in self.main_setopts.items:
setattr(opts, opt, getattr(self.main_setopts, opt))
# get the install root to use
self.optparser._checkAbsInstallRoot(opts.installroot)
(root, opts.conffile) = self._root_and_conffile(opts.installroot,
opts.conffile)
# the conffile is solid now
assert(opts.conffile is not None)
if opts.quiet:
opts.debuglevel = 0
if opts.verbose:
opts.debuglevel = opts.errorlevel = dnf.const.VERBOSE_LEVEL
# Read up configuration options and initialize plugins
overrides = self.optparser._non_nones2dict(self._get_first_config(opts))
releasever = opts.releasever
try:
self.read_conf_file(opts.conffile, root, releasever, overrides)
# now set all the non-first-start opts from main from our setopts
if self.main_setopts:
for opt in self.main_setopts.items:
if not hasattr(self.base.conf, opt):
msg ="Main config did not have a %s attr. before setopt"
logger.warning(msg % opt)
setattr(self.base.conf, opt, getattr(self.main_setopts, opt))
except (dnf.exceptions.ConfigError, ValueError) as e:
logger.critical(_('Config error: %s'), e)
sys.exit(1)
except IOError as e:
e = '%s: %s' % (ucd(e.args[1]), repr(e.filename))
logger.critical(_('Config error: %s'), e)
sys.exit(1)
for item in bad_setopt_tm:
msg = "Setopt argument has multiple values: %s"
logger.warning(msg % item)
for item in bad_setopt_ne:
msg = "Setopt argument has no value: %s"
logger.warning(msg % item)
self.optparser.configure_from_options(opts, self.base.conf, self.demands,
self.base.output)
self.base.cmds = cmds
if opts.version:
opts.quiet = True
opts.verbose = False
if opts.quiet:
opts.debuglevel = 0
if opts.verbose:
opts.debuglevel = opts.errorlevel = dnf.const.VERBOSE_LEVEL
self.nogpgcheck = opts.nogpgcheck
# the configuration reading phase is now concluded, finish the init
self._configure_cachedir()
# with cachedir in place we can configure stuff depending on it:
self.base.activate_persistor()
self._configure_repos(opts)
if opts.version:
print_versions(self.base.conf.history_record_packages, self.base,
self.base.output)
sys.exit(0)
# store the main commands & summaries, before plugins are loaded
self.optparser.add_commands(self.cli_commands, 'main')
if self.base.conf.plugins:
self.base.plugins.load(self.base.conf.pluginpath, opts.disableplugins)
self.base.plugins.run_init(self.base, self)
# store the plugin commands & summaries
self.optparser.add_commands(self.cli_commands,'plugin')
# build the usage info and put it into the optparser.
self.optparser.usage = self.optparser.get_usage()
# show help if the user requests it
# this is done here, because we first have the full
# usage info after the plugins are loaded.
if opts.help:
self.optparser.print_help()
sys.exit(0)
# save our original args out
self.base.args = args
# save out as a nice command string
self.cmdstring = dnf.const.PROGRAM_NAME + ' '
for arg in self.base.args:
self.cmdstring += '%s ' % arg
self._log_essentials()
try:
self._parse_commands() # before we return check over the base command
# + args make sure they match/make sense
except CliError:
sys.exit(1)
self.command.configure(self.base.extcmds)
if opts.debugsolver:
self.base.conf.debug_solver = True
self.base.cmd_conf.downloadonly = opts.downloadonly
self.base.plugins.run_config()
def check(self):
"""Make sure the command line and options make sense."""
self.command.doCheck(self.base.basecmd, self.base.extcmds)
def read_conf_file(self, path=None, root="/", releasever=None,
overrides=None):
timer = dnf.logging.Timer('config')
conf = self.base.conf
conf.installroot = root
conf.read(path)
if releasever is None:
releasever = dnf.rpm.detect_releasever(root)
conf.releasever = releasever
subst = conf.substitutions
subst.update_from_etc(root)
if overrides is not None:
conf.override(overrides)
conf.logdir = dnf.yum.config.logdir_fit(conf.logdir)
for opt in ('cachedir', 'logdir', 'persistdir'):
conf.prepend_installroot(opt)
conf._var_replace(opt)
self.base.logging.setup_from_dnf_conf(conf)
timer()
return conf
def register_command(self, command_cls):
"""Register a Command. :api"""
for name in command_cls.aliases:
if name in self.cli_commands:
raise dnf.exceptions.ConfigError(_('Command "%s" already defined') % name)
self.cli_commands[name] = command_cls
def run(self):
"""Call the base command, and pass it the extended commands or
arguments.
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
self._process_demands()
return self.command.run(self.base.extcmds)
def print_usage(self):
return self.optparser.print_usage()
class CmdConf(object):
"""Class for storing nonpersistent configuration"""
downloadonly = False
| gpl-2.0 | 388,673,573,406,924,300 | 39.432457 | 104 | 0.568289 | false |
magfest/ubersystem | uber/api.py | 1 | 38302 | import re
import uuid
from datetime import datetime
from functools import wraps
import cherrypy
import pytz
import six
from cherrypy import HTTPError
from dateutil import parser as dateparser
from pockets import unwrap
from time import mktime
from residue import UTCDateTime
from sqlalchemy import and_, func, or_
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from sqlalchemy.types import Boolean, Date
from uber.barcode import get_badge_num_from_barcode
from uber.config import c
from uber.decorators import department_id_adapter
from uber.errors import CSRFException
from uber.models import AdminAccount, ApiToken, Attendee, Department, DeptMembership, DeptMembershipRequest, \
Event, IndieStudio, Job, Session, Shift, GuestGroup, Room, HotelRequests, RoomAssignment
from uber.server import register_jsonrpc
from uber.utils import check, check_csrf, normalize_newlines
__version__ = '1.0'
def docstring_format(*args, **kwargs):
def _decorator(obj):
obj.__doc__ = obj.__doc__.format(*args, **kwargs)
return obj
return _decorator
def _format_opts(opts):
html = ['<table class="opts"><tbody>']
for value, label in opts:
html.append(
'<tr class="opt">'
'<td class="opt-value">{}</td>'
'<td class="opt-label">{}</td>'
'</tr>'.format(value, label))
html.append('</tbody></table>')
return ''.join(html)
def _attendee_fields_and_query(full, query):
if full:
fields = AttendeeLookup.fields_full
query = query.options(
subqueryload(Attendee.dept_memberships),
subqueryload(Attendee.assigned_depts),
subqueryload(Attendee.food_restrictions),
subqueryload(Attendee.shifts).subqueryload(Shift.job))
else:
fields = AttendeeLookup.fields
query = query.options(subqueryload(Attendee.dept_memberships))
return (fields, query)
def _parse_datetime(d):
if isinstance(d, six.string_types) and d.strip().lower() == 'now':
d = datetime.now(pytz.UTC)
else:
d = dateparser.parse(d)
try:
d = d.astimezone(pytz.UTC) # aware object can be in any timezone
except ValueError:
d = c.EVENT_TIMEZONE.localize(d) # naive assumed to be event timezone
return d
def _parse_if_datetime(key, val):
# This should be in the UTCDateTime and Date classes, but they're not defined in this app
if hasattr(getattr(Attendee, key), 'type') and (
isinstance(getattr(Attendee, key).type, UTCDateTime) or isinstance(getattr(Attendee, key).type, Date)):
return _parse_datetime(val)
return val
def _parse_if_boolean(key, val):
if hasattr(getattr(Attendee, key), 'type') and isinstance(getattr(Attendee, key).type, Boolean):
if isinstance(val, six.string_types):
return val.strip().lower() not in ('f', 'false', 'n', 'no', '0')
else:
return bool(val)
return val
def auth_by_token(required_access):
token = cherrypy.request.headers.get('X-Auth-Token', None)
if not token:
return (401, 'Missing X-Auth-Token header')
try:
token = uuid.UUID(token)
except ValueError as ex:
return (403, 'Invalid auth token, {}: {}'.format(ex, token))
with Session() as session:
api_token = session.query(ApiToken).filter_by(token=token).first()
if not api_token:
return (403, 'Auth token not recognized: {}'.format(token))
if api_token.revoked_time:
return (403, 'Revoked auth token: {}'.format(token))
for access_level in required_access:
if not getattr(api_token, access_level, None):
return (403, 'Insufficient access for auth token: {}'.format(token))
cherrypy.session['account_id'] = api_token.admin_account_id
return None
def auth_by_session(required_access):
try:
check_csrf()
except CSRFException:
return (403, 'Your CSRF token is invalid. Please go back and try again.')
admin_account_id = cherrypy.session.get('account_id')
if not admin_account_id:
return (403, 'Missing admin account in session')
with Session() as session:
admin_account = session.query(AdminAccount).filter_by(id=admin_account_id).first()
if not admin_account:
return (403, 'Invalid admin account in session')
for access_level in required_access:
if not getattr(admin_account, access_level, None):
return (403, 'Insufficient access for admin account')
return None
def api_auth(*required_access):
required_access = set(required_access)
def _decorator(fn):
inner_func = unwrap(fn)
if getattr(inner_func, 'required_access', None) is not None:
return fn
else:
inner_func.required_access = required_access
@wraps(fn)
def _with_api_auth(*args, **kwargs):
error = None
for auth in [auth_by_token, auth_by_session]:
result = auth(required_access)
error = error or result
if not result:
return fn(*args, **kwargs)
raise HTTPError(*error)
return _with_api_auth
return _decorator
class all_api_auth:
def __init__(self, *required_access):
self.required_access = required_access
def __call__(self, cls):
for name, fn in cls.__dict__.items():
if hasattr(fn, '__call__'):
setattr(cls, name, api_auth(*self.required_access)(fn))
return cls
@all_api_auth('api_read')
class GuestLookup:
fields = {
'group_id': True,
'group_type': True,
'info': {
'status': True,
'poc_phone': True
},
'bio': {
'status': True,
'desc': True,
'website': True,
'facebook': True,
'twitter': True,
'other_social_media': True,
'teaser_song_url': True,
'pic_url': True
},
'interview': {
'will_interview': True,
'email': True,
'direct_contact': True
},
'group': {
'name': True,
'website': True,
'description': True
}
}
def types(self):
return c.GROUP_TYPE_VARS
def list(self, type=None):
"""
Returns a list of Guests.
Optionally, 'type' may be passed to limit the results to a specific
guest type. For a full list of guest types, call the "guest.types"
method.
"""
with Session() as session:
if type and type.upper() in c.GROUP_TYPE_VARS:
query = session.query(GuestGroup).filter_by(group_type=getattr(c, type.upper()))
else:
query = session.query(GuestGroup)
return [guest.to_dict(self.fields) for guest in query]
@all_api_auth('api_read')
class MivsLookup:
fields = {
'name': True,
'address': True,
'website': True,
'twitter': True,
'facebook': True,
'status_label': True,
'staff_notes': True,
'group': {
'name': True,
},
'developers': {
'full_name': True,
'first_name': True,
'last_name': True,
'email': True,
'cellphone': True,
},
}
def statuses(self):
return c.MIVS_STUDIO_STATUS_VARS
def list(self, status=False):
"""
Returns a list of MIVS studios and their developers.
Optionally, 'status' may be passed to limit the results to MIVS
studios with a specific status. Use 'confirmed' to get MIVS teams
who are attending the event.
For a full list of statuses, call the "mivs.statuses" method.
"""
with Session() as session:
if status and status.upper() in c.MIVS_STUDIO_STATUS_VARS:
query = session.query(IndieStudio).filter_by(status=getattr(c, status.upper()))
else:
query = session.query(IndieStudio)
return [mivs.to_dict(self.fields) for mivs in query]
@all_api_auth('api_read')
class AttendeeLookup:
fields = {
'full_name': True,
'first_name': True,
'last_name': True,
'legal_name': True,
'email': True,
'zip_code': True,
'cellphone': True,
'ec_name': True,
'ec_phone': True,
'checked_in': True,
'badge_num': True,
'badge_printed_name': True,
'badge_status_label': True,
'badge_type_label': True,
'amount_unpaid': True,
'donation_tier': True,
'donation_tier_label': True,
'donation_tier_paid': True,
'staffing': True,
'is_dept_head': True,
'ribbon_labels': True,
'public_id': True,
}
fields_full = dict(fields, **{
'assigned_depts_labels': True,
'weighted_hours': True,
'worked_hours': True,
'food_restrictions': {
'sandwich_pref_labels': True,
'standard_labels': True,
'freeform': True
},
'shifts': {
'worked': True,
'worked_label': True,
'job': [
'type_label', 'department_name', 'name', 'description',
'start_time', 'end_time', 'extra15', 'weight'
]
},
'group': {
'name': True,
},
})
def lookup(self, badge_num, full=False):
"""
Returns a single attendee by badge number.
Takes the badge number as the first parameter.
Optionally, "full" may be passed as the second parameter to return the
complete attendee record, including departments, shifts, and food
restrictions.
"""
with Session() as session:
attendee_query = session.query(Attendee).filter_by(badge_num=badge_num)
fields, attendee_query = _attendee_fields_and_query(full, attendee_query)
attendee = attendee_query.first()
if attendee:
return attendee.to_dict(fields)
else:
raise HTTPError(404, 'No attendee found with badge #{}'.format(badge_num))
def search(self, query, full=False):
"""
Searches for attendees using a freeform text query. Returns all
matching attendees using the same search algorithm as the main
attendee search box.
Takes the search query as the first parameter.
Optionally, "full" may be passed as the second parameter to return the
complete attendee record, including departments, shifts, and food
restrictions.
"""
with Session() as session:
attendee_query = session.search(query)
fields, attendee_query = _attendee_fields_and_query(full, attendee_query)
return [a.to_dict(fields) for a in attendee_query.limit(100)]
@api_auth('api_update')
def update(self, **kwargs):
"""
Updates an existing attendee record. "id" parameter is required and
sets the attendee to be updated. All other fields are taken as changes
to the attendee.
Returns the updated attendee.
"""
if not 'id' in kwargs:
return HTTPError(400, 'You must provide the id of the attendee.')
with Session() as session:
attendee = session.query(Attendee).filter(Attendee.id == kwargs['id']).one()
if not attendee:
return HTTPError(404, 'Attendee {} not found.'.format(kwargs['id']))
for key, val in kwargs.items():
if not hasattr(Attendee, key):
return HTTPError(400, 'Attendee has no field {}'.format(key))
setattr(attendee, key, val)
session.add(attendee)
session.commit()
return attendee.to_dict(self.fields)
def login(self, first_name, last_name, email, zip_code):
"""
Does a lookup similar to the volunteer checklist pages login screen.
"""
#this code largely copied from above with different fields
with Session() as session:
attendee_query = session.query(Attendee).filter(Attendee.first_name.ilike(first_name),
Attendee.last_name.ilike(last_name),
Attendee.email.ilike(email),
Attendee.zip_code.ilike(zip_code))
fields, attendee_query = _attendee_fields_and_query(False, attendee_query)
try:
attendee = attendee_query.one()
except MultipleResultsFound:
raise HTTPError(404, 'found more than one attendee with matching information?')
except NoResultFound:
raise HTTPError(404, 'No attendee found with matching information')
return attendee.to_dict(fields)
def export(self, query, full=False):
"""
Searches for attendees by either email, "first last" name, or
"first last <email>" combinations.
`query` should be a comma or newline separated list of email/name
queries.
Example:
<pre>Merrium Webster, [email protected], John Doe <[email protected]></pre>
Results are returned in the format expected by
<a href="../import/staff">the staff importer</a>.
"""
_re_name_email = re.compile(r'^\s*(.*?)\s*<\s*(.*?@.*?)\s*>\s*$')
_re_sep = re.compile(r'[\n,]')
_re_whitespace = re.compile(r'\s+')
queries = [s.strip() for s in _re_sep.split(normalize_newlines(query)) if s.strip()]
names = dict()
emails = dict()
names_and_emails = dict()
ids = set()
for q in queries:
if '@' in q:
match = _re_name_email.match(q)
if match:
name = match.group(1)
email = Attendee.normalize_email(match.group(2))
if name:
first, last = (_re_whitespace.split(name.lower(), 1) + [''])[0:2]
names_and_emails[(first, last, email)] = q
else:
emails[email] = q
else:
emails[Attendee.normalize_email(q)] = q
elif q:
try:
ids.add(str(uuid.UUID(q)))
except Exception:
first, last = (_re_whitespace.split(q.lower(), 1) + [''])[0:2]
names[(first, last)] = q
with Session() as session:
if full:
options = [
subqueryload(Attendee.dept_memberships).subqueryload(DeptMembership.department),
subqueryload(Attendee.dept_membership_requests).subqueryload(DeptMembershipRequest.department)]
else:
options = []
email_attendees = []
if emails:
email_attendees = session.query(Attendee).filter(Attendee.normalized_email.in_(list(emails.keys()))) \
.options(*options).order_by(Attendee.email, Attendee.id).all()
known_emails = set(a.normalized_email for a in email_attendees)
unknown_emails = sorted([raw for normalized, raw in emails.items() if normalized not in known_emails])
name_attendees = []
if names:
filters = [
and_(func.lower(Attendee.first_name) == first, func.lower(Attendee.last_name) == last)
for first, last in names.keys()]
name_attendees = session.query(Attendee).filter(or_(*filters)) \
.options(*options).order_by(Attendee.email, Attendee.id).all()
known_names = set((a.first_name.lower(), a.last_name.lower()) for a in name_attendees)
unknown_names = sorted([raw for normalized, raw in names.items() if normalized not in known_names])
name_and_email_attendees = []
if names_and_emails:
filters = [
and_(
func.lower(Attendee.first_name) == first,
func.lower(Attendee.last_name) == last,
Attendee.normalized_email == email)
for first, last, email in names_and_emails.keys()]
name_and_email_attendees = session.query(Attendee).filter(or_(*filters)) \
.options(*options).order_by(Attendee.email, Attendee.id).all()
known_names_and_emails = set(
(a.first_name.lower(), a.last_name.lower(), a.normalized_email) for a in name_and_email_attendees)
unknown_names_and_emails = sorted([
raw for normalized, raw in names_and_emails.items() if normalized not in known_names_and_emails])
id_attendees = []
if ids:
id_attendees = session.query(Attendee).filter(Attendee.id.in_(ids)) \
.options(*options).order_by(Attendee.email, Attendee.id).all()
known_ids = set(str(a.id) for a in id_attendees)
unknown_ids = sorted([i for i in ids if i not in known_ids])
seen = set()
all_attendees = [
a for a in (id_attendees + email_attendees + name_attendees + name_and_email_attendees)
if a.id not in seen and not seen.add(a.id)]
fields = [
'first_name',
'last_name',
'birthdate',
'email',
'zip_code',
'birthdate',
'international',
'ec_name',
'ec_phone',
'cellphone',
'badge_printed_name',
'found_how',
'comments',
'admin_notes',
'all_years',
'badge_status',
'badge_status_label',
]
if full:
fields.extend(['shirt'])
attendees = []
for a in all_attendees:
d = a.to_dict(fields)
if full:
assigned_depts = {}
checklist_admin_depts = {}
dept_head_depts = {}
poc_depts = {}
for membership in a.dept_memberships:
assigned_depts[membership.department_id] = membership.department.name
if membership.is_checklist_admin:
checklist_admin_depts[membership.department_id] = membership.department.name
if membership.is_dept_head:
dept_head_depts[membership.department_id] = membership.department.name
if membership.is_poc:
poc_depts[membership.department_id] = membership.department.name
d.update({
'assigned_depts': assigned_depts,
'checklist_admin_depts': checklist_admin_depts,
'dept_head_depts': dept_head_depts,
'poc_depts': poc_depts,
'requested_depts': {
(m.department_id if m.department_id else 'All'):
(m.department.name if m.department_id else 'Anywhere')
for m in a.dept_membership_requests},
})
attendees.append(d)
return {
'unknown_ids': unknown_ids,
'unknown_emails': unknown_emails,
'unknown_names': unknown_names,
'unknown_names_and_emails': unknown_names_and_emails,
'attendees': attendees,
}
@api_auth('api_create')
def create(self, first_name, last_name, email, params):
"""
Create an attendee with at least a first name, last name, and email. Prevents duplicate attendees.
`params` should be a dictionary with column name: value to set other values, or a falsey value.
Use labels for Choice and MultiChoice columns, and a string like "no" or "yes" for Boolean columns.
Date and DateTime columns should be parsed correctly as long as they follow a standard format.
Example `params` dictionary for setting extra parameters:
<pre>{"placeholder": "yes", "legal_name": "First Last", "cellphone": "5555555555"}</pre>
"""
with Session() as session:
attendee_query = session.query(Attendee).filter(Attendee.first_name.ilike("first_name"),
Attendee.last_name.ilike("last_name"),
Attendee.email.ilike("[email protected]"))
if attendee_query.first():
raise HTTPError(400, 'An attendee with this name and email address already exists')
attendee = Attendee(first_name=first_name, last_name=last_name, email=email)
if params:
for key, val in params.items():
params[key] = _parse_if_datetime(key, val)
params[key] = _parse_if_boolean(key, val)
attendee.apply(params, restricted=False)
session.add(attendee)
message = check(attendee)
if message:
session.rollback()
raise HTTPError(400, message)
# Duplicates functionality on the admin form that makes placeholder badges need not pay
# Staff (not volunteers) also almost never need to pay by default
if (attendee.placeholder or
attendee.staffing and c.VOLUNTEER_RIBBON not in attendee.ribbon_ints) and 'paid' not in params:
attendee.paid = c.NEED_NOT_PAY
return attendee.id
@api_auth('api_update')
def update(self, id, params):
"""
Update an attendee using their unique ID, returned by our lookup functions.
`params` should be a dictionary with column name: value to update values.
Use labels for Choice and MultiChoice columns, and a string like "no" or "yes" for Boolean columns.
Date and DateTime columns should be parsed correctly as long as they follow a standard format.
Example:
<pre>{"first_name": "First", "paid": "doesn't need to", "ribbon": "Volunteer, Panelist"}</pre>
"""
with Session() as session:
attendee = session.attendee(id, allow_invalid=True)
if not attendee:
raise HTTPError(404, 'No attendee found with this ID')
if not params:
raise HTTPError(400, 'Please provide parameters to update')
for key, val in params.items():
params[key] = _parse_if_datetime(key, val)
params[key] = _parse_if_boolean(key, val)
attendee.apply(params, restricted=False)
message = check(attendee)
if message:
session.rollback()
raise HTTPError(400, message)
# Staff (not volunteers) also almost never need to pay by default
if attendee.staffing and not attendee.orig_value_of('staffing') \
and c.VOLUNTEER_RIBBON not in attendee.ribbon_ints and 'paid' not in params:
attendee.paid = c.NEED_NOT_PAY
return attendee.id
@all_api_auth('api_update')
class JobLookup:
fields = {
'name': True,
'description': True,
'department_name': True,
'start_time': True,
'end_time': True,
'duration': True,
'shifts': {
'worked': True,
'worked_label': True,
'attendee': {
'badge_num': True,
'full_name': True,
'first_name': True,
'last_name': True,
'email': True,
'cellphone': True,
'badge_printed_name': True
}
}
}
@department_id_adapter
@api_auth('api_read')
def lookup(self, department_id, start_time=None, end_time=None):
"""
Returns a list of all shifts for the given department.
Takes the department id as the first parameter. For a list of all
department ids call the "dept.list" method.
Optionally, takes a "start_time" and "end_time" to constrain the
results to a given date range. Dates may be given in any format
supported by the
<a href="http://dateutil.readthedocs.io/en/stable/parser.html">
dateutil parser</a>, plus the string "now".
Unless otherwise specified, "start_time" and "end_time" are assumed
to be in the local timezone of the event.
"""
with Session() as session:
query = session.query(Job).filter_by(department_id=department_id)
if start_time:
start_time = _parse_datetime(start_time)
query = query.filter(Job.start_time >= start_time)
if end_time:
end_time = _parse_datetime(end_time)
query = query.filter(Job.start_time <= end_time)
query = query.options(
subqueryload(Job.department),
subqueryload(Job.shifts).subqueryload(Shift.attendee))
return [job.to_dict(self.fields) for job in query]
def assign(self, job_id, attendee_id):
"""
Assigns a shift for the given job to the given attendee.
Takes the job id and attendee id as parameters.
"""
with Session() as session:
message = session.assign(attendee_id, job_id)
if message:
raise HTTPError(400, message)
else:
session.commit()
return session.job(job_id).to_dict(self.fields)
def unassign(self, shift_id):
"""
Unassigns whomever is working the given shift.
Takes the shift id as the only parameter.
"""
with Session() as session:
shift = session.query(Shift).filter_by(id=shift_id).first()
if not shift:
raise HTTPError(404, 'Shift id not found:{}'.format(shift_id))
session.delete(shift)
session.commit()
return session.job(shift.job_id).to_dict(self.fields)
@docstring_format(
_format_opts(c.WORKED_STATUS_OPTS),
_format_opts(c.RATING_OPTS))
def set_worked(self, shift_id, status=c.SHIFT_WORKED, rating=c.UNRATED, comment=''):
"""
Sets the given shift status as worked or not worked.
Takes the shift id as the first parameter.
Optionally takes the shift status, rating, and a comment required to
explain either poor or excellent performance.
<h6>Valid status values</h6>
{}
<h6>Valid rating values</h6>
{}
"""
try:
status = int(status)
assert c.WORKED_STATUS[status] is not None
except Exception:
raise HTTPError(400, 'Invalid status: {}'.format(status))
try:
rating = int(rating)
assert c.RATINGS[rating] is not None
except Exception:
raise HTTPError(400, 'Invalid rating: {}'.format(rating))
if rating in (c.RATED_BAD, c.RATED_GREAT) and not comment:
raise HTTPError(400, 'You must leave a comment explaining why the staffer was rated as: {}'.format(
c.RATINGS[rating]))
with Session() as session:
shift = session.query(Shift).filter_by(id=shift_id).first()
if not shift:
raise HTTPError(404, 'Shift id not found:{}'.format(shift_id))
shift.worked = status
shift.rating = rating
shift.comment = comment
session.commit()
return session.job(shift.job_id).to_dict(self.fields)
@all_api_auth('api_read')
class DepartmentLookup:
def list(self):
"""
Returns a list of department ids and names.
"""
return c.DEPARTMENTS
@department_id_adapter
@api_auth('api_read')
def jobs(self, department_id):
"""
Returns a list of all roles and jobs for the given department.
Takes the department id as the first parameter. For a list of all
department ids call the "dept.list" method.
"""
with Session() as session:
department = session.query(Department).filter_by(id=department_id).first()
if not department:
raise HTTPError(404, 'Department id not found: {}'.format(department_id))
return department.to_dict({
'id': True,
'name': True,
'description': True,
'solicits_volunteers': True,
'is_shiftless': True,
'is_setup_approval_exempt': True,
'is_teardown_approval_exempt': True,
'max_consecutive_minutes': True,
'jobs': {
'id': True,
'type': True,
'name': True,
'description': True,
'start_time': True,
'duration': True,
'weight': True,
'slots': True,
'extra15': True,
'visibility': True,
'required_roles': {'id': True},
},
'dept_roles': {
'id': True,
'name': True,
'description': True,
},
})
@all_api_auth('api_read')
class ConfigLookup:
fields = [
'EVENT_NAME',
'ORGANIZATION_NAME',
'EVENT_YEAR',
'EPOCH',
'ESCHATON',
'EVENT_VENUE',
'EVENT_VENUE_ADDRESS',
'EVENT_TIMEZONE',
'AT_THE_CON',
'POST_CON',
'URL_BASE',
'URL_ROOT',
'PATH',
'BADGE_PRICE',
'BADGES_SOLD',
'REMAINING_BADGES',
]
def info(self):
"""
Returns a list of all available configuration settings.
"""
output = {field: getattr(c, field) for field in self.fields}
# This is to allow backward compatibility with pre 1.0 code
output['YEAR'] = c.EVENT_YEAR
output['API_VERSION'] = __version__
output['EVENT_TIMEZONE'] = str(output['EVENT_TIMEZONE'])
return output
def lookup(self, field):
"""
Returns the given configuration setting. Takes the setting
name as a single argument. For a list of available settings,
call the "config.info" method.
"""
if field.upper() in self.fields:
return getattr(c, field.upper())
else:
raise HTTPError(404, 'Config field not found: {}'.format(field))
@all_api_auth('api_read')
class HotelLookup:
def eligible_attendees(self):
"""
Returns a list of hotel eligible attendees
"""
with Session() as session:
attendees = session.query(Attendee.id).filter(Attendee.hotel_eligible == True).all()
return [x.id for x in attendees]
@api_auth('api_update')
def update_room(self, id=None, **kwargs):
"""
Create or update a hotel room. If the id of an existing room is
supplied then it will attempt to update an existing room.
Possible attributes are notes, message, locked_in, nights, and created.
Returns the created room, with its id.
"""
with Session() as session:
if id:
room = session.query(Room).filter(Room.id == id).one_or_none()
if not room:
return HTTPError(404, "Could not locate room {}".format(id))
else:
room = Room()
for attr in ['notes', 'message', 'locked_in', 'nights', 'created']:
if attr in kwargs:
setattr(room, attr, kwargs[attr])
session.add(room)
session.commit()
return room.to_dict()
@api_auth('api_update')
def update_request(self, id=None, **kwargs):
"""
Create or update a hotel request. If the id is supplied then it will
attempt to update the given request.
Possible attributes are attendee_id, nights, wanted_roommates, unwanted_roommates, special_needs, and approved.
Returns the created or updated request.
"""
with Session() as session:
if id:
hotel_request = session.query(HotelRequests).filter(HotelRequests.id == id).one_or_none()
if not hotel_request:
return HTTPError(404, "Could not locate request {}".format(id))
else:
hotel_request = HotelRequests()
for attr in ['attendee_id', 'nights', 'wanted_roommates', 'unwanted_roommates', 'special_needs', 'approved']:
if attr in kwargs:
setattr(hotel_request, attr, kwargs[attr])
session.add(hotel_request)
session.commit()
return hotel_request.to_dict()
@api_auth('api_update')
def update_assignment(self, id=None, **kwargs):
"""
Create or update a hotel room assignment. If the id is supplied then it will
attempt to update the given request. Otherwise a new one is created.
Possible attributes are room_id, and attendee_id.
Returns the created or updated assignment.
"""
with Session() as session:
if id:
assignment = session.query(RoomAssignment).filter(RoomAssignment.id == id).one_or_none()
if not assignment:
return HTTPError(404, "Could not locate room assignment {}".format(id))
else:
assignment = RoomAssignment()
for attr in ['room_id', 'attendee_id']:
if attr in kwargs:
setattr(assignment, attr, kwargs[attr])
session.add(assignment)
session.commit()
return assignment.to_dict()
def nights(self):
"""
Returns the available room nights.
"""
return {
"core_nights": c.CORE_NIGHTS,
"setup_nights": c.SETUP_NIGHTS,
"teardown_nights": c.TEARDOWN_NIGHTS,
"dates": c.NIGHT_DATES,
"order": c.NIGHT_DISPLAY_ORDER,
"names": c.NIGHT_NAMES
}
@all_api_auth('api_read')
class ScheduleLookup:
def schedule(self):
"""
Returns the entire schedule in machine parseable format.
"""
with Session() as session:
return [
{
'name': event.name,
'location': event.location_label,
'start': event.start_time_local.strftime('%I%p %a').lstrip('0'),
'end': event.end_time_local.strftime('%I%p %a').lstrip('0'),
'start_unix': int(mktime(event.start_time.utctimetuple())),
'end_unix': int(mktime(event.end_time.utctimetuple())),
'duration': event.minutes,
'description': event.description,
'panelists': [panelist.attendee.full_name for panelist in event.assigned_panelists]
}
for event in sorted(session.query(Event).all(), key=lambda e: [e.start_time, e.location_label])
]
@all_api_auth('api_read')
class BarcodeLookup:
def lookup_attendee_from_barcode(self, barcode_value, full=False):
"""
Returns a single attendee using the barcode value from their badge.
Takes the (possibly encrypted) barcode value as the first parameter.
Optionally, "full" may be passed as the second parameter to return the
complete attendee record, including departments, shifts, and food
restrictions.
"""
badge_num = -1
try:
result = get_badge_num_from_barcode(barcode_value)
badge_num = result['badge_num']
except Exception as e:
raise HTTPError(500, "Couldn't look up barcode value: " + str(e))
# Note: A decrypted barcode can yield a valid badge num,
# but that badge num may not be assigned to an attendee.
with Session() as session:
query = session.query(Attendee).filter_by(badge_num=badge_num)
fields, query = _attendee_fields_and_query(full, query)
attendee = query.first()
if attendee:
return attendee.to_dict(fields)
else:
raise HTTPError(404, 'Valid barcode, but no attendee found with Badge #{}'.format(badge_num))
def lookup_badge_number_from_barcode(self, barcode_value):
"""
Returns a badge number using the barcode value from the given badge.
Takes the (possibly encrypted) barcode value as a single parameter.
"""
try:
result = get_badge_num_from_barcode(barcode_value)
return {'badge_num': result['badge_num']}
except Exception as e:
raise HTTPError(500, "Couldn't look up barcode value: " + str(e))
if c.API_ENABLED:
register_jsonrpc(AttendeeLookup(), 'attendee')
register_jsonrpc(JobLookup(), 'shifts')
register_jsonrpc(DepartmentLookup(), 'dept')
register_jsonrpc(ConfigLookup(), 'config')
register_jsonrpc(BarcodeLookup(), 'barcode')
register_jsonrpc(GuestLookup(), 'guest')
register_jsonrpc(MivsLookup(), 'mivs')
register_jsonrpc(HotelLookup(), 'hotel')
register_jsonrpc(ScheduleLookup(), 'schedule')
| agpl-3.0 | -382,661,947,119,875,000 | 36.477495 | 121 | 0.552425 | false |
gingerswede/ITSecCardGame | src/Controller/Master.py | 1 | 3311 | '''
IDE: Eclipse (PyDev)
Python version: 2.7
Operating system: Windows 8.1
@author: Emil Carlsson
@copyright: 2015 Emil Carlsson
@license: This program is distributed under the terms of the GNU General Public License
'''
import Tkinter as tk, pygame
from Tkconstants import NW, BOTH
from View import GlobalFunc
from Controller import Menu as MenuController, Game as GameController
from Model import Player, Settings
from Model.Sounds import Sounds as Sound
class MasterController(object):
VIEW_AREA = "viewarea"
BACKGROUND_COLOR = "chartreuse4"
BACKGROUND_COLOR_CARD = "green4"
DECK_COLOR = "purple4"
RED = "red4"
__viewArea = None
__menuArea = None
__menuController = None
__gameController = None
__root = None
__player = None
__mixer = None
__sounds = None
__settings = None
def __init__(self, root):
self.__player = Player.Player()
self.__root = root
self.__settings = Settings.Settings()
self.__menuArea = tk.Frame(root, width=root.winfo_screenwidth())
self.__menuArea.pack(anchor=NW)
background = tk.Frame(root, width=root.winfo_screenwidth(), height=root.winfo_screenheight(), background=self.BACKGROUND_COLOR)
background.pack(fill=BOTH, expand=True, pady=5, padx=5)
self.__viewArea = tk.Frame(background, background=self.BACKGROUND_COLOR)
self.__viewArea.pack(pady=10, padx=10, fill=BOTH, expand=True)
self.__menuController = MenuController.MenuController(self.__viewArea, self)
self.__gameController = GameController.GameController(self.__viewArea, self.__player, self)
self.__menuController.DisplayBasicMenu(self.__menuArea)
root.bind('<Escape>', lambda e, root=self.__viewArea: self.OpenMenu(root))
root.bind('<x>', lambda e:self.CloseApplication())
self.__sounds = Sound()
self.__mixer = pygame.mixer
self.__mixer.init()
self.OpenMenu()
def ShowCredits(self, *args, **kwargs):
self.__mixer.stop()
if self.__settings.Music:
sound = self.__mixer.Sound(self.__sounds.EndCredit)
sound.play(loops=-1)
self.__menuController.ShowCredits()
def ShowSettings(self, *args, **kwargs):
self.__mixer.stop()
if self.__settings.Music:
sound = self.__mixer.Sound(self.__sounds.MenuMusic)
sound.play(loops=-1)
self.__menuController.ShowSettings()
def OpenMenu(self):
self.__mixer.stop()
if self.__settings.Music:
sound = self.__mixer.Sound(self.__sounds.MenuMusic)
sound.play(loops=-1)
self.__menuController.OpenMainMenu(self.__viewArea)
def StartNewGame(self):
self.__mixer.stop()
if self.__settings.Music:
sound = self.__mixer.Sound(self.__sounds.GamePlayMusic)
sound.play(loops=-1)
self.__gameController.StartNewGame()
def CloseApplication(self):
GlobalFunc.CloseWindow(self.__root)
@property
def Settings(self):
return self.__settings | gpl-3.0 | -7,776,639,673,486,931,000 | 29.385321 | 135 | 0.597101 | false |
codoo/vertical-exchange | distributed_db/__openerp__.py | 1 | 1429 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Lucas Huber, Copyright CoĐoo Project
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Distributed database API',
'version': '0.X',
'category': 'Exchange',
'summary': 'Basic framework API for distributed ledgers',
'author': 'Lucas Huber, CoĐoo Project',
'license': 'AGPL-3',
'website': 'https://github.com/codoo/vertical-exchange',
# 'description': ''
'depends': ['base_exchange'],
'data': [
'security/ir.model.access.csv',
'distributed_db_view.xml',
# 'distributed_db_workflow.xml',
],
"installable": True,
}
| agpl-3.0 | 3,288,404,125,434,701,300 | 36.552632 | 78 | 0.592852 | false |
TUT-ARG/sed_vis | tests/visualization_test.py | 1 | 4772 | #!/usr/bin/env python
import sed_vis
import dcase_util
import os
mode = 'probability'
current_path = os.path.dirname(os.path.realpath(__file__))
if mode == 'dcase2016':
audio_container = dcase_util.containers.AudioContainer().load(
os.path.join(current_path, 'data', 'a001.wav')
)
event_lists = {
'reference': dcase_util.containers.MetaDataContainer().load(
os.path.join(current_path, 'data', 'a001.ann')
)
}
vis = sed_vis.visualization.EventListVisualizer(
event_lists=event_lists,
event_list_order=['reference'],
audio_signal=audio_container.data,
sampling_rate=audio_container.fs,
spec_cmap='jet',
spec_interpolation='bicubic',
spec_win_size=1024,
spec_hop_size=1024/2,
spec_fft_size=1024,
publication_mode=True
)
vis.show()
elif mode == 'publication':
# Example how to create plots for publications, use "save the figure" button and
# select svg format. Open figure in e.g. inkscape and edit to your liking.
audio_container = dcase_util.containers.AudioContainer().load(
os.path.join(current_path, 'data', 'a001.wav')
)
event_lists = {
'reference': dcase_util.containers.MetaDataContainer().load(
os.path.join(current_path, 'data', 'a001.ann')
),
'full': dcase_util.containers.MetaDataContainer().load(
os.path.join(current_path, 'data', 'a001_full.ann')
),
'estimated': dcase_util.containers.MetaDataContainer().load(
os.path.join(current_path, 'data', 'a001_system_output.ann')
)
}
vis = sed_vis.visualization.EventListVisualizer(
event_lists=event_lists,
event_list_order=['reference', 'full', 'estimated'],
audio_signal=audio_container.data,
sampling_rate=audio_container.fs,
spec_cmap='jet',
spec_interpolation='bicubic',
spec_win_size=1024,
spec_hop_size=1024/8,
spec_fft_size=1024,
publication_mode=True
)
vis.show()
elif mode == 'sync':
# Test for audio and visual synchronization during the playback.
audio_container = dcase_util.containers.AudioContainer().load(
os.path.join(current_path, 'data', 'sync', 'sin_silence.wav')
)
event_lists = {
'reference': dcase_util.containers.MetaDataContainer().load(
os.path.join(current_path, 'data', 'sync', 'sin_silence.txt')
)
}
vis = sed_vis.visualization.EventListVisualizer(
event_lists=event_lists,
audio_signal=audio_container.data,
sampling_rate=audio_container.fs,
mode='time_domain'
)
vis.show()
elif mode == 'multiple':
# Test visualization of multiple system outputs
audio_container = dcase_util.containers.AudioContainer().load(
os.path.join(current_path, 'data', 'a001.wav')
)
event_lists = {
'reference': dcase_util.containers.MetaDataContainer().load(
os.path.join(current_path, 'data', 'a001.ann')
),
'estimated1': dcase_util.containers.MetaDataContainer().load(
os.path.join(current_path, 'data', 'a001_system_output.ann')
),
'estimated2': dcase_util.containers.MetaDataContainer().load(
os.path.join(current_path, 'data', 'a001_system_output_2.ann')
)
}
vis = sed_vis.visualization.EventListVisualizer(
event_lists=event_lists,
event_list_order=['reference', 'estimated1', 'estimated2'],
audio_signal=audio_container.data,
sampling_rate=audio_container.fs,
spec_cmap='jet',
spec_interpolation='bicubic',
spec_win_size=1024,
spec_hop_size=1024/8,
spec_fft_size=1024,
publication_mode=True
)
vis.show()
elif mode == 'probability':
audio_container = dcase_util.containers.AudioContainer().load(
os.path.join(current_path, 'data', 'a001.wav')
)
event_lists = {
'reference': dcase_util.containers.MetaDataContainer().load(
os.path.join(current_path, 'data', 'a001.ann')
),
'estimated': dcase_util.containers.MetaDataContainer().load(
os.path.join(current_path, 'data', 'a001_system_output_prob.csv')
)
}
vis = sed_vis.visualization.EventListVisualizer(
event_lists=event_lists,
event_list_order=['reference','estimated'], # 'full', 'estimated'],
audio_signal=audio_container.data,
sampling_rate=audio_container.fs,
spec_cmap='jet',
spec_interpolation='bicubic',
spec_win_size=1024,
spec_hop_size=1024/8,
spec_fft_size=1024,
publication_mode=True
)
vis.show() | mit | -4,408,422,476,550,994,000 | 31.25 | 84 | 0.615046 | false |
mete0r/hypua2jamo | src/hypua2jamo/__init__.py | 1 | 2055 | # -*- coding: utf-8 -*-
# hypua2jamo: Convert Hanyang-PUA code to unicode Hangul Jamo
# Copyright (C) 2012 mete0r
#
# This file is part of hypua2jamo.
#
# hypua2jamo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# hypua2jamo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with hypua2jamo. If not, see <http://www.gnu.org/licenses/>.
import sys
import logging
__version__ = '0.6.dev0'
logger = logging.getLogger(__name__)
jython = sys.platform.startswith('java')
if sys.version >= '3':
unichr = chr
def translate(pua, composed=True):
''' Convert a unicode string with Hanyang-PUA codes
to a Syllable-Initial-Peak-Final encoded unicode string.
:param pua: a unicode string with Hanyang-PUA codes
:param composed: the result should be composed as possible (default True)
:return: Syllable-Initial-Peak-Final encoded unicode string
'''
from .encoder import PUAComposedEncoder
from .encoder import PUADecomposedEncoder
if composed:
JamoEncoder = PUAComposedEncoder
else:
JamoEncoder = PUADecomposedEncoder
encoder = JamoEncoder()
return encoder.encode(pua, final=True)
def codes2unicode(codes, composed=True):
''' Convert Hanyang-PUA code iterable to Syllable-Initial-Peak-Final
encoded unicode string.
:param codes:
an iterable of Hanyang-PUA code
:param composed:
the result should be composed as much as possible (default True)
:return: Syllable-Initial-Peak-Final encoded unicode string
'''
pua = u''.join(unichr(code) for code in codes)
return translate(pua, composed=composed)
| lgpl-3.0 | -3,895,820,284,967,063,600 | 31.619048 | 77 | 0.723601 | false |
reinforceio/tensorforce | tensorforce/environments/vizdoom.py | 1 | 5707 | # Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from collections import OrderedDict
import itertools
import numpy as np
from tensorforce.environments import Environment
class ViZDoom(Environment):
"""
[ViZDoom](https://github.com/mwydmuch/ViZDoom) environment adapter (specification key:
`vizdoom`).
May require:
```bash
sudo apt-get install g++ build-essential libsdl2-dev zlib1g-dev libmpg123-dev libjpeg-dev \
libsndfile1-dev nasm tar libbz2-dev libgtk2.0-dev make cmake git chrpath timidity \
libfluidsynth-dev libgme-dev libopenal-dev timidity libwildmidi-dev unzip libboost-all-dev \
liblua5.1-dev
pip3 install vizdoom
```
Args:
level (string): ViZDoom configuration file
(<span style="color:#C00000"><b>required</b></span>).
include_variables (bool): Whether to include game variables to state
(<span style="color:#00C000"><b>default</b></span>: false).
factored_action (bool): Whether to use factored action representation
(<span style="color:#00C000"><b>default</b></span>: false).
visualize (bool): Whether to visualize interaction
(<span style="color:#00C000"><b>default</b></span>: false).
frame_skip (int > 0): Number of times to repeat an action without observing
(<span style="color:#00C000"><b>default</b></span>: 12).
seed (int): Random seed
(<span style="color:#00C000"><b>default</b></span>: none).
"""
def __init__(
self, level, visualize=False, include_variables=False, factored_action=False,
frame_skip=12, seed=None
):
super().__init__()
from vizdoom import DoomGame, Mode, ScreenFormat, ScreenResolution
self.config_file = level
self.include_variables = include_variables
self.factored_action = factored_action
self.visualize = visualize
self.frame_skip = frame_skip
self.environment = DoomGame()
self.environment.load_config(self.config_file)
if self.visualize:
self.environment.set_window_visible(True)
self.environment.set_mode(Mode.ASYNC_PLAYER)
else:
self.environment.set_window_visible(False)
self.environment.set_mode(Mode.PLAYER)
# e.g. CRCGCB, RGB24, GRAY8
self.environment.set_screen_format(ScreenFormat.RGB24)
# e.g. RES_320X240, RES_640X480, RES_1920X1080
self.environment.set_screen_resolution(ScreenResolution.RES_640X480)
self.environment.set_depth_buffer_enabled(False)
self.environment.set_labels_buffer_enabled(False)
self.environment.set_automap_buffer_enabled(False)
if seed is not None:
self.environment.setSeed(seed)
self.environment.init()
self.state_shape = (480, 640, 3)
self.num_variables = self.environment.get_available_game_variables_size()
self.num_buttons = self.environment.get_available_buttons_size()
self.available_actions = [
tuple(a) for a in itertools.product([0, 1], repeat=self.num_buttons)
]
def __str__(self):
return super().__str__() + '({})'.format(self.config_file)
def states(self):
if self.include_variables:
return OrderedDict(
screen=dict(type='float', shape=self.state_shape, min_value=0.0, max_value=1.0),
variables=dict(type='float', shape=self.num_variables)
)
else:
return dict(type='float', shape=self.state_shape, min_value=0.0, max_value=1.0)
def actions(self):
if self.factored_action:
return dict(type='bool', shape=self.num_buttons)
else:
return dict(type='int', shape=(), num_values=len(self.available_actions))
def close(self):
self.environment.close()
self.environment = None
def get_states(self):
state = self.environment.get_state()
screen = state.screen_buffer.astype(dtype=np.float32) / 255.0
if self.include_variables:
return OrderedDict(screen=screen, variables=state.game_variables)
else:
return screen
def reset(self):
self.environment.new_episode()
self.current_states = self.get_states()
return self.current_states
def execute(self, actions):
if self.factored_action:
action = np.where(actions, 1.0, 0.0)
else:
action = self.available_actions[actions]
if self.visualize:
self.environment.set_action(action)
reward = 0.0
for _ in range(self.frame_skip):
self.environment.advance_action()
reward += self.environment.get_last_reward()
else:
reward = self.environment.make_action(list(action), self.frame_skip)
terminal = self.environment.is_episode_finished()
if not terminal:
self.current_states = self.get_states()
return self.current_states, terminal, reward
| apache-2.0 | 7,290,622,197,792,052,000 | 38.358621 | 96 | 0.631856 | false |
codebhendi/alfred-bot | wiki.py | 1 | 1295 | import re
from urllib2 import Request, urlopen, URLError
import json
def handle(text, speaker):
# method to get the wiki summary
get_wiki(text, speaker)
def get_wiki(title, speaker):
# get the user voice input as string
# make a call to the Wikipedia API
request = Request('https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro=&explaintext=&titles='+title)
try:
response = urlopen(request)
data = json.load(response)
# Parse the JSON to just get the extract. Always get the first summary.
output = data["query"]["pages"]
final = output[output.keys()[0]]["extract"]
speaker.say(final)
except URLError, e:
speaker.say("Unable to reach dictionary API.")
def isValid(text):
wiki= bool(re.search(r'\bWiki\b',text, re.IGNORECASE))
# Add 'Wicky' because the STT engine recognizes it quite often
wicky= bool(re.search(r'\bwicky\b',text, re.IGNORECASE))
article= bool(re.search(r'\barticle\b',text, re.IGNORECASE))
if wicky or wiki or article:
return True
else:
return False
def check_command(data, speaker):
if "check" in data :
speaker.say("What would you like to learn about?")
return True
else :
return False | mit | 1,545,980,548,038,383,600 | 30.609756 | 134 | 0.656371 | false |
betatim/BlackBox | skopt/tests/test_utils.py | 2 | 7895 | import pytest
import tempfile
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
import numpy as np
from skopt import gp_minimize
from skopt import load
from skopt import dump
from skopt import expected_minimum
from skopt.benchmarks import bench1
from skopt.benchmarks import bench3
from skopt.learning import ExtraTreesRegressor
from skopt import Optimizer
from skopt import Space
from skopt.utils import point_asdict
from skopt.utils import point_aslist
from skopt.utils import dimensions_aslist
from skopt.utils import has_gradients
from skopt.utils import cook_estimator
from skopt.utils import normalize_dimensions
from skopt.utils import use_named_args
from skopt.space import Real, Integer, Categorical
def check_optimization_results_equality(res_1, res_2):
# Check if the results objects have the same keys
assert_equal(sorted(res_1.keys()), sorted(res_2.keys()))
# Shallow check of the main optimization results
assert_array_equal(res_1.x, res_2.x)
assert_array_equal(res_1.x_iters, res_2.x_iters)
assert_array_equal(res_1.fun, res_2.fun)
assert_array_equal(res_1.func_vals, res_2.func_vals)
@pytest.mark.fast_test
def test_dump_and_load():
res = gp_minimize(bench3,
[(-2.0, 2.0)],
x0=[0.],
acq_func="LCB",
n_calls=2,
n_random_starts=0,
random_state=1)
# Test normal dumping and loading
with tempfile.TemporaryFile() as f:
dump(res, f)
f.seek(0)
res_loaded = load(f)
check_optimization_results_equality(res, res_loaded)
assert_true("func" in res_loaded.specs["args"])
# Test dumping without objective function
with tempfile.TemporaryFile() as f:
dump(res, f, store_objective=False)
f.seek(0)
res_loaded = load(f)
check_optimization_results_equality(res, res_loaded)
assert_true(not ("func" in res_loaded.specs["args"]))
# Delete the objective function and dump the modified object
del res.specs["args"]["func"]
with tempfile.TemporaryFile() as f:
dump(res, f, store_objective=False)
f.seek(0)
res_loaded = load(f)
check_optimization_results_equality(res, res_loaded)
assert_true(not ("func" in res_loaded.specs["args"]))
@pytest.mark.fast_test
def test_dump_and_load_optimizer():
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer([(-2.0, 2.0)], base_estimator, n_random_starts=1,
acq_optimizer="sampling")
opt.run(bench1, n_iter=3)
with tempfile.TemporaryFile() as f:
dump(opt, f)
f.seek(0)
load(f)
@pytest.mark.fast_test
def test_expected_minimum():
res = gp_minimize(bench3,
[(-2.0, 2.0)],
x0=[0.],
noise=1e-8,
n_calls=8,
n_random_starts=3,
random_state=1)
x_min, f_min = expected_minimum(res, random_state=1)
x_min2, f_min2 = expected_minimum(res, random_state=1)
assert f_min <= res.fun # true since noise ~= 0.0
assert x_min == x_min2
assert f_min == f_min2
@pytest.mark.fast_test
def test_dict_list_space_representation():
"""
Tests whether the conversion of the dictionary and list representation
of a point from a search space works properly.
"""
chef_space = {
'Cooking time': (0, 1200), # in minutes
'Main ingredient': [
'cheese', 'cherimoya', 'chicken', 'chard', 'chocolate', 'chicory'
],
'Secondary ingredient': [
'love', 'passion', 'dedication'
],
'Cooking temperature': (-273.16, 10000.0) # in Celsius
}
opt = Optimizer(dimensions=dimensions_aslist(chef_space))
point = opt.ask()
# check if the back transformed point and original one are equivalent
assert_equal(
point,
point_aslist(chef_space, point_asdict(chef_space, point))
)
@pytest.mark.fast_test
@pytest.mark.parametrize("estimator, gradients",
zip(["GP", "RF", "ET", "GBRT", "DUMMY"],
[True, False, False, False, False]))
def test_has_gradients(estimator, gradients):
space = Space([(-2.0, 2.0)])
assert has_gradients(cook_estimator(estimator, space=space)) == gradients
@pytest.mark.fast_test
def test_categorical_gp_has_gradients():
space = Space([('a', 'b')])
assert not has_gradients(cook_estimator('GP', space=space))
@pytest.mark.fast_test
def test_normalize_dimensions_all_categorical():
dimensions = (['a', 'b', 'c'], ['1', '2', '3'])
space = normalize_dimensions(dimensions)
assert space.is_categorical
@pytest.mark.fast_test
@pytest.mark.parametrize("dimensions, normalizations",
[(((1, 3), (1., 3.)),
('normalize', 'normalize')
),
(((1, 3), ('a', 'b', 'c')),
('normalize', 'onehot')
),
])
def test_normalize_dimensions(dimensions, normalizations):
space = normalize_dimensions(dimensions)
for dimension, normalization in zip(space, normalizations):
assert dimension.transform_ == normalization
@pytest.mark.fast_test
@pytest.mark.parametrize("dimension, name",
[(Real(1, 2, name="learning rate"), "learning rate"),
(Integer(1, 100, name="no of trees"), "no of trees"),
(Categorical(["red, blue"], name="colors"), "colors")])
def test_normalize_dimensions(dimension, name):
space = normalize_dimensions([dimension])
assert space.dimensions[0].name == name
@pytest.mark.fast_test
def test_use_named_args():
"""
Test the function wrapper @use_named_args which is used
for wrapping an objective function with named args so it
can be called by the optimizers which only pass a single
list as the arg.
This test does not actually use the optimizers but merely
simulates how they would call the function.
"""
# Define the search-space dimensions. They must all have names!
dim1 = Real(name='foo', low=0.0, high=1.0)
dim2 = Real(name='bar', low=0.0, high=1.0)
dim3 = Real(name='baz', low=0.0, high=1.0)
# Gather the search-space dimensions in a list.
dimensions = [dim1, dim2, dim3]
# Parameters that will be passed to the objective function.
default_parameters = [0.5, 0.6, 0.8]
# Define the objective function with named arguments
# and use this function-decorator to specify the search-space dimensions.
@use_named_args(dimensions=dimensions)
def func(foo, bar, baz):
# Assert that all the named args are indeed correct.
assert foo == default_parameters[0]
assert bar == default_parameters[1]
assert baz == default_parameters[2]
# Return some objective value.
return foo ** 2 + bar ** 4 + baz ** 8
# Ensure the objective function can be called with a single
# argument named x.
res = func(x=default_parameters)
assert (isinstance(res, float))
# Ensure the objective function can be called with a single
# argument that is unnamed.
res = func(default_parameters)
assert (isinstance(res, float))
# Ensure the objective function can be called with a single
# argument that is a numpy array named x.
res = func(x=np.array(default_parameters))
assert (isinstance(res, float))
# Ensure the objective function can be called with a single
# argument that is an unnamed numpy array.
res = func(np.array(default_parameters))
assert (isinstance(res, float))
| bsd-3-clause | -9,219,509,595,222,137,000 | 32.312236 | 81 | 0.625586 | false |
ronniedada/tabula | setup.py | 1 | 1088 | from setuptools import setup, find_packages
version = '1.0.5'
setup(
name = 'tabula',
version = version,
description = "Ascii table",
url = "https://github.com/ronniedada/tabula",
long_description = open("README.md").read(),
classifiers = [
"Intended Audience :: Developers",
'Development Status :: 3 - Alpha',
'Environment :: Console',
"Operating System :: POSIX",
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: User Interfaces',
'Topic :: Terminals'
],
keywords = ['couchbase', 'terminal', 'ascii', 'table', 'numpy', 'curses'],
author = "Ronnie Sun",
author_email = "[email protected]",
license="Apache Software License",
packages = find_packages(exclude=['ez_setup']),
include_package_data = True,
install_requires = [
'setuptools',
'numpy',
],
)
| apache-2.0 | -733,620,760,118,308,900 | 31.969697 | 78 | 0.589154 | false |
xstrengthofonex/code-live-tutorials | python_web_development/tcp_server/redis_clone/threaded_redis_clone.py | 1 | 1560 | import socket
import threading
local = threading.local()
def handle(conn):
local.data = {}
while True:
message = conn.recv(1024).decode()
fields = message.rstrip("\r\n").split(" ")
command = fields[0]
if command == "QUIT":
break
if len(fields) < 2:
continue
if command == "GET":
key = fields[1]
value = local.data.get(key)
conn.sendall("{}\r\n".format(value).encode())
elif command == "SET":
if len(fields) != 3:
conn.send("EXPECTED VALUE\r\n".encode())
continue
key = fields[1]
value = fields[2]
local.data[key] = value
elif command == "DEL":
key = fields[1]
local.data.pop(key)
else:
conn.sendall("INVALID COMMAND {}\r\n".format(command).encode())
conn.close()
def run_server():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("", 8080))
sock.listen(1)
print("Running at {}".format(sock.getsockname()))
while True:
try:
conn, addr = sock.accept()
print("Connected to {}".format(addr))
threaded_handler = threading.Thread(target=handle, args=(conn,))
threaded_handler.start()
except KeyboardInterrupt:
print("Shutting down server")
break
sock.close()
if __name__ == "__main__":
run_server() | mit | -438,459,386,589,432,770 | 26.385965 | 76 | 0.524359 | false |
bbqsrc/kbdgen | pysrc/kbdgen/orderedyaml.py | 2 | 1796 | import io
from collections import OrderedDict
import yaml
import yaml.constructor
# Courtesy of https://gist.github.com/844388. Thanks!
class OrderedDictYAMLLoader(yaml.Loader):
"""A YAML loader that loads mappings into ordered dictionaries."""
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.add_constructor("tag:yaml.org,2002:map", type(self).construct_yaml_map)
self.add_constructor("tag:yaml.org,2002:omap", type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(
None,
None,
"expected a mapping node, but found %s" % node.id,
node.start_mark,
)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError(
"while constructing a mapping",
node.start_mark,
"found unacceptable key (%s)" % exc,
key_node.start_mark,
)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def load(f):
return yaml.load(f, OrderedDictYAMLLoader)
def loads(string):
return yaml.load(io.StringIO(string), OrderedDictYAMLLoader)
| apache-2.0 | -2,055,110,915,693,184,000 | 29.965517 | 85 | 0.586303 | false |
HPI-SWA-Lab/RSqueak | rsqueakvm/primitives/constants.py | 1 | 6591 | # ___________________________________________________________________________
# SmallInteger Primitives
ADD = 1
SUBTRACT = 2
LESSTHAN = 3
GREATERTHAN = 4
LESSOREQUAL = 5
GREATEROREQUAL = 6
EQUAL = 7
NOTEQUAL = 8
MULTIPLY = 9
DIVIDE = 10
MOD = 11
DIV = 12
QUO = 13
BIT_AND = 14
BIT_OR = 15
BIT_XOR = 16
BIT_SHIFT = 17
MAKE_POINT = 18
FAIL = 19
LARGE_OFFSET = 20
LARGE_REM = 20
LARGE_ADD = 21
LARGE_SUBTRACT = 22
LARGE_LESSTHAN = 23
LARGE_GREATERTHAN = 24
LARGE_LESSOREQUAL = 25
LARGE_GREATEROREQUAL = 26
LARGE_EQUAL = 27
LARGE_NOTEQUAL = 28
LARGE_MULTIPLY = 29
LARGE_DIVIDE = 30
LARGE_MOD = 31
LARGE_DIV = 32
LARGE_QUO = 33
LARGE_BIT_AND = 34
LARGE_BIT_OR = 35
LARGE_BIT_XOR = 36
LARGE_BIT_SHIFT = 37
# ___________________________________________________________________________
# Float Primitives
FLOAT_OFFSET = 40
SMALLINT_AS_FLOAT = 40
FLOAT_ADD = 41
FLOAT_SUBTRACT = 42
FLOAT_LESSTHAN = 43
FLOAT_GREATERTHAN = 44
FLOAT_LESSOREQUAL = 45
FLOAT_GREATEROREQUAL = 46
FLOAT_EQUAL = 47
FLOAT_NOTEQUAL = 48
FLOAT_MULTIPLY = 49
FLOAT_DIVIDE = 50
FLOAT_TRUNCATED = 51
# OPTIONAL: 52, 53
FLOAT_TIMES_TWO_POWER = 54
FLOAT_SQUARE_ROOT = 55
FLOAT_SIN = 56
FLOAT_ARCTAN = 57
FLOAT_LOG_N = 58
FLOAT_EXP = 59
# ___________________________________________________________________________
# Subscript and Stream Primitives
AT = 60
AT_PUT = 61
SIZE = 62
STRING_AT = 63
STRING_AT_PUT = 64
# ___________________________________________________________________________
# Stream Primitives
NEXT = 65
NEXT_PUT = 66
AT_END = 67
# ___________________________________________________________________________
# Storage Management Primitives
OBJECT_AT = 68
OBJECT_AT_PUT = 69
NEW = 70
NEW_WITH_ARG = 71
ARRAY_BECOME_ONE_WAY = 72 # Blue Book: primitiveBecome
INST_VAR_AT = 73
INST_VAR_AT_PUT = 74
AS_OOP = 75
STORE_STACKP = 76 # Blue Book: primitiveAsObject
SOME_INSTANCE = 77
NEXT_INSTANCE = 78
NEW_METHOD = 79
# ___________________________________________________________________________
# Control Primitives
BLOCK_COPY = 80
VALUE = 81
VALUE_WITH_ARGS = 82
PERFORM = 83
PERFORM_WITH_ARGS = 84
PERFORM_IN_SUPERCLASS = 100
SIGNAL = 85
WAIT = 86
RESUME = 87
SUSPEND = 88
FLUSH_CACHE = 89
# ___________________________________________________________________________
# I/O Primitives
MOUSE_POINT = 90
TEST_DISPLAY_DEPTH = 91
SET_DISPLAY_MODE = 92
INPUT_SEMAPHORE = 93
GET_NEXT_EVENT = 94
INPUT_WORD = 95
BITBLT_COPY_BITS = 96
SNAPSHOT = 97
STORE_IMAGE_SEGMENT = 98
LOAD_IMAGE_SEGMENT = 99
BE_CURSOR = 101
BE_DISPLAY = 102
SCAN_CHARACTERS = 103
OBSOLETE_INDEXED = 104 # also 96
REPLACE_FROM_TO = 105
SCREEN_SIZE = 106
MOUSE_BUTTONS = 107
KBD_NEXT = 108
KBD_PEEK = 109
# ___________________________________________________________________________
# Control Primitives
EQUIVALENT = 110
CLASS = 111
BYTES_LEFT = 112
QUIT = 113
EXIT_TO_DEBUGGER = 114
CHANGE_CLASS = 115 # Blue Book: primitiveOopsLeft
COMPILED_METHOD_FLUSH_CACHE = 116
EXTERNAL_CALL = 117
SYMBOL_FLUSH_CACHE = 119
# ___________________________________________________________________________
# Miscellaneous Primitives
CALLOUT_TO_FFI = 120
IMAGE_NAME = 121
NOOP = 122
VALUE_UNINTERRUPTABLY = 123
LOW_SPACE_SEMAPHORE = 124
SIGNAL_AT_BYTES_LEFT = 125
DEFER_UPDATES = 126
DRAW_RECTANGLE = 127
# ___________________________________________________________________________
# Squeak Miscellaneous Primitives
BECOME = 128
SPECIAL_OBJECTS_ARRAY = 129
FULL_GC = 130
INC_GC = 131
SET_INTERRUPT_KEY = 133
INTERRUPT_SEMAPHORE = 134
# ____________________________________________________________________________
# Time Primitives
MILLISECOND_CLOCK = 135
SIGNAL_AT_MILLISECONDS = 136
SECONDS_CLOCK = 137
# ____________________________________________________________________________
# Misc Primitives
SOME_OBJECT = 138
NEXT_OBJECT = 139
BEEP = 140
CLIPBOARD_TEXT = 141
VM_PATH = 142
SHORT_AT = 143
SHORT_AT_PUT = 144
FILL = 145
CLONE = 148
SYSTEM_ATTRIBUTE = 149
# ___________________________________________________________________________
# File primitives
# (XXX they are obsolete in Squeak and done with a plugin)
FILE_AT_END = 150
FILE_CLOSE = 151
FILE_GET_POSITION = 152
FILE_OPEN = 153
FILE_READ = 154
FILE_SET_POSITION = 155
FILE_DELETE = 156
FILE_SIZE = 157
FILE_WRITE = 158
FILE_RENAME = 159
DIRECTORY_CREATE = 160
DIRECTORY_DELIMITOR = 161
DIRECTORY_LOOKUP = 162
DIRECTORY_DELTE = 163
# ___________________________________________________________________________
# Misc primitives
YIELD = 167
INTEGER_AT = 165
INTEGER_AT_PUT = 166
CHARACTER_VALUE = 170
IMMEDIATE_IDENTITY_HASH = 171
SLOT_AT = 173
SLOT_AT_PUT = 174
CLASS_IDENTITY_HASH = 175
MAX_IDENTITY_HASH = 176
ALL_INSTANCES = 177
ALL_OBJECTS = 178
BYTE_SIZE_OF_INSTANCE = 181
EXIT_CRITICAL_SECTION = 185 # similar to SIGNAL, hence SIGNAL + 100
ENTER_CRITICAL_SECTION = 186 # similar to WAIT, hence WAIT + 100
TEST_AND_SET_OWNERSHIP_OF_CRITICAL_SECTION = 187
WITH_ARGS_EXECUTE_METHOD = 188
# ___________________________________________________________________________
# BlockClosure Primitives
CLOSURE_COPY_WITH_COPIED_VALUES = 200
CLOSURE_VALUE = 201
CLOSURE_VALUE_ = 202
CLOSURE_VALUE_VALUE = 203
CLOSURE_VALUE_VALUE_VALUE = 204
CLOSURE_VALUE_VALUE_VALUE_VALUE = 205
CLOSURE_VALUE_WITH_ARGS = 206 # valueWithArguments:
# ___________________________________________________________________________
# Override the default primitive to give latitude to the VM in context management.
CTXT_AT = 210
CTXT_AT_PUT = 211
CTXT_SIZE = 212
CLOSURE_VALUE_NO_CONTEXT_SWITCH = 221
CLOSURE_VALUE_NO_CONTEXT_SWITCH_ = 222
# ___________________________________________________________________________
# Drawing
IDLE_FOR_MICROSECONDS = 230
FORCE_DISPLAY_UPDATE = 231
SET_FULL_SCREEN = 233
# ____________________________________________________________________________
# Time Primitives
UTC_MICROSECOND_CLOCK = 240
LOCAL_MICROSECOND_CLOCK = 241
SIGNAL_AT_UTC_MICROSECONDS = 242
UPDATE_TIMEZONE = 243
# ___________________________________________________________________________
# VM implementor primitives
VM_CLEAR_PROFILE = 250
VM_DUMP_PROFILE = 251
VM_START_PROFILING = 252
VM_STOP_PROFILING = 253
VM_PARAMETERS = 254
META_PRIM_FAILED = 255 # Used to be INST_VARS_PUT_FROM_STACK. Never used except in Disney tests. Remove after 2.3 release.
# ___________________________________________________________________________
# Quick Push Const Primitives
PUSH_SELF = 256
PUSH_TRUE = 257
PUSH_FALSE = 258
PUSH_NIL = 259
PUSH_MINUS_ONE = 260
PUSH_ZERO = 261
PUSH_ONE = 262
PUSH_TWO = 263
# ___________________________________________________________________________
# VM primitives
VM_LOADED_MODULES = 573
| bsd-3-clause | 7,074,637,390,688,386,000 | 25.258964 | 124 | 0.551206 | false |
HybridF5/jacket | jacket/storage/backup/drivers/tsm.py | 1 | 20861 | # Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Backup driver for IBM Tivoli Storage Manager (TSM).
Implementation of a backup service that uses IBM Tivoli Storage Manager (TSM)
as the backend. The driver uses TSM command line dsmc utility to
run the backup and restore operations.
This version supports backup of block devices, e.g, FC, iSCSI, local as well as
regular files.
A prerequisite for using the IBM TSM backup service is configuring the
Cinder host for using TSM.
"""
import json
import os
import stat
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from jacket.storage.backup import driver
from jacket.storage import exception
from jacket.storage.i18n import _LE, _
from jacket.storage import utils
LOG = logging.getLogger(__name__)
tsm_opts = [
cfg.StrOpt('backup_tsm_volume_prefix',
default='backup',
help='Volume prefix for the backup id when backing up to TSM'),
cfg.StrOpt('backup_tsm_password',
default='password',
help='TSM password for the running username',
secret=True),
cfg.BoolOpt('backup_tsm_compression',
default=True,
help='Enable or Disable compression for backups'),
]
CONF = cfg.CONF
CONF.register_opts(tsm_opts)
VALID_BACKUP_MODES = ['image', 'file']
def _get_backup_metadata(backup, operation):
"""Return metadata persisted with backup object."""
try:
svc_dict = json.loads(backup.service_metadata)
backup_path = svc_dict.get('backup_path')
backup_mode = svc_dict.get('backup_mode')
except TypeError:
# for backwards compatibility
vol_prefix = CONF.backup_tsm_volume_prefix
backup_id = backup['id']
backup_path = utils.make_dev_path('%s-%s' %
(vol_prefix, backup_id))
backup_mode = 'image'
if backup_mode not in VALID_BACKUP_MODES:
volume_id = backup['volume_id']
backup_id = backup['id']
err = (_('%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. '
'Backup object has unexpected mode. Image or file '
'backups supported, actual mode is %(vol_mode)s.')
% {'op': operation,
'bck_id': backup_id,
'vol_id': volume_id,
'vol_mode': backup_mode})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
return backup_path, backup_mode
def _image_mode(backup_mode):
"""True if backup is image type."""
return backup_mode == 'image'
def _make_link(volume_path, backup_path, vol_id):
"""Create a hard link for the volume block device.
The IBM TSM client performs an image backup on a block device.
The name of the block device is the backup prefix plus the backup id
:param volume_path: real device path name for volume
:param backup_path: path name TSM will use as volume to backup
:param vol_id: id of volume to backup (for reporting)
:raises: InvalidBackup
"""
try:
utils.execute('ln', volume_path, backup_path,
run_as_root=True,
check_exit_code=True)
except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to create device hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'vpath': volume_path,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _create_unique_device_link(backup_id, volume_path, volume_id, bckup_mode):
"""Create a consistent hardlink for the volume block device.
Create a consistent hardlink using the backup id so TSM
will be able to backup and restore to the same block device.
:param backup_id: the backup id
:param volume_path: real path of the backup/restore device
:param volume_id: Volume id for backup or as restore target
:param bckup_mode: TSM backup mode, either 'image' or 'file'
:raises: InvalidBackup
:returns: str -- hardlink path of the volume block device
"""
if _image_mode(bckup_mode):
hardlink_path = utils.make_dev_path('%s-%s' %
(CONF.backup_tsm_volume_prefix,
backup_id))
else:
dir, volname = os.path.split(volume_path)
hardlink_path = ('%s/%s-%s' %
(dir,
CONF.backup_tsm_volume_prefix,
backup_id))
_make_link(volume_path, hardlink_path, volume_id)
return hardlink_path
def _check_dsmc_output(output, check_attrs, exact_match=True):
"""Check dsmc command line utility output.
Parse the output of the dsmc command and make sure that a given
attribute is present, and that it has the proper value.
TSM attribute has the format of "text : value".
:param output: TSM output to parse
:param check_attrs: text to identify in the output
:param exact_match: if True, the check will pass only if the parsed
value is equal to the value specified in check_attrs. If false, the
check will pass if the parsed value is greater than or equal to the
value specified in check_attrs. This is needed because for file
backups, the parent directories may also be included the first a
volume is backed up.
:returns: bool -- indicate if requited output attribute found in output
"""
parsed_attrs = {}
for line in output.split('\n'):
# parse TSM output: look for "msg : value
key, sep, val = line.partition(':')
if sep is not None and key is not None and len(val.strip()) > 0:
parsed_attrs[key] = val.strip()
for ckey, cval in check_attrs.items():
if ckey not in parsed_attrs:
return False
elif exact_match and parsed_attrs[ckey] != cval:
return False
elif not exact_match and int(parsed_attrs[ckey]) < int(cval):
return False
return True
def _get_volume_realpath(volume_file, volume_id):
"""Get the real path for the volume block device.
If the volume is not a block device or a regular file issue an
InvalidBackup exception.
:param volume_file: file object representing the volume
:param volume_id: Volume id for backup or as restore target
:raises: InvalidBackup
:returns: str -- real path of volume device
:returns: str -- backup mode to be used
"""
try:
# Get real path
volume_path = os.path.realpath(volume_file.name)
# Verify that path is a block device
volume_mode = os.stat(volume_path).st_mode
if stat.S_ISBLK(volume_mode):
backup_mode = 'image'
elif stat.S_ISREG(volume_mode):
backup_mode = 'file'
else:
err = (_('backup: %(vol_id)s failed. '
'%(path)s is unexpected file type. Block or regular '
'files supported, actual file mode is %(vol_mode)s.')
% {'vol_id': volume_id,
'path': volume_path,
'vol_mode': volume_mode})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except AttributeError:
err = (_('backup: %(vol_id)s failed. Cannot obtain real path '
'to volume at %(path)s.')
% {'vol_id': volume_id,
'path': volume_file})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except OSError:
err = (_('backup: %(vol_id)s failed. '
'%(path)s is not a file.')
% {'vol_id': volume_id,
'path': volume_path})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
return volume_path, backup_mode
def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id):
"""Remove the hardlink for the volume block device.
:param hardlink_path: hardlink to the volume block device
:param volume_path: real path of the backup/restore device
:param volume_id: Volume id for backup or as restore target
"""
try:
utils.execute('rm',
'-f',
hardlink_path,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('backup: %(vol_id)s failed to remove backup hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s.'),
{'vol_id': volume_id,
'vpath': volume_path,
'bpath': hardlink_path,
'out': exc.stdout,
'err': exc.stderr})
class TSMBackupDriver(driver.BackupDriver):
"""Provides backup, restore and delete of volumes backup for TSM."""
DRIVER_VERSION = '1.0.0'
def __init__(self, context, db_driver=None):
super(TSMBackupDriver, self).__init__(context, db_driver)
self.tsm_password = CONF.backup_tsm_password
self.volume_prefix = CONF.backup_tsm_volume_prefix
def _do_backup(self, backup_path, vol_id, backup_mode):
"""Perform the actual backup operation.
:param backup_path: volume path
:param vol_id: volume id
:param backup_mode: file mode of source volume; 'image' or 'file'
:raises: InvalidBackup
"""
backup_attrs = {'Total number of objects backed up': '1'}
compr_flag = 'yes' if CONF.backup_tsm_compression else 'no'
backup_cmd = ['dsmc', 'backup']
if _image_mode(backup_mode):
backup_cmd.append('image')
backup_cmd.extend(['-quiet',
'-compression=%s' % compr_flag,
'-password=%s' % self.tsm_password,
backup_path])
out, err = utils.execute(*backup_cmd,
run_as_root=True,
check_exit_code=False)
success = _check_dsmc_output(out, backup_attrs, exact_match=False)
if not success:
err = (_('backup: %(vol_id)s failed to obtain backup '
'success notification from server.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _do_restore(self, backup_path, restore_path, vol_id, backup_mode):
"""Perform the actual restore operation.
:param backup_path: the path the backup was created from, this
identifies the backup to tsm
:param restore_path: volume path to restore into
:param vol_id: volume id
:param backup_mode: mode used to create the backup ('image' or 'file')
:raises: InvalidBackup
"""
restore_attrs = {'Total number of objects restored': '1'}
restore_cmd = ['dsmc', 'restore']
if _image_mode(backup_mode):
restore_cmd.append('image')
restore_cmd.append('-noprompt') # suppress prompt
else:
restore_cmd.append('-replace=yes') # suppress prompt
restore_cmd.extend(['-quiet',
'-password=%s' % self.tsm_password,
backup_path])
if restore_path != backup_path:
restore_cmd.append(restore_path)
out, err = utils.execute(*restore_cmd,
run_as_root=True,
check_exit_code=False)
success = _check_dsmc_output(out, restore_attrs)
if not success:
err = (_('restore: %(vol_id)s failed.\n'
'stdout: %(out)s\n stderr: %(err)s.')
% {'vol_id': vol_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def backup(self, backup, volume_file, backup_metadata=False):
"""Backup the given volume to TSM.
TSM performs a backup of a volume. The volume_file is used
to determine the path of the block device that TSM will back-up.
:param backup: backup information for volume
:param volume_file: file object representing the volume
:param backup_metadata: whether or not to backup volume metadata
:raises InvalidBackup
"""
# TODO(dosaboy): this needs implementing (see backup.drivers.ceph for
# an example)
if backup_metadata:
msg = _("Volume metadata backup requested but this driver does "
"not yet support this feature.")
raise exception.InvalidBackup(reason=msg)
volume_path, backup_mode = _get_volume_realpath(volume_file,
backup.volume_id)
LOG.debug('Starting backup of volume: %(volume_id)s to TSM,'
' volume path: %(volume_path)s, mode: %(mode)s.',
{'volume_id': backup.volume_id,
'volume_path': volume_path,
'mode': backup_mode})
backup_path = _create_unique_device_link(backup.id,
volume_path,
backup.volume_id,
backup_mode)
service_metadata = {'backup_mode': backup_mode,
'backup_path': backup_path}
backup.service_metadata = json.dumps(service_metadata)
backup.save()
try:
self._do_backup(backup_path, backup.volume_id, backup_mode)
except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': backup.volume_id,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('backup: %(vol_id)s failed to run dsmc '
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': backup.volume_id,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
finally:
_cleanup_device_hardlink(backup_path, volume_path,
backup.volume_id)
LOG.debug('Backup %s finished.', backup.id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from TSM server.
:param backup: backup information for volume
:param volume_id: volume id
:param volume_file: file object representing the volume
:raises: InvalidBackup
"""
# backup_path is the path that was originally backed up.
backup_path, backup_mode = _get_backup_metadata(backup, 'restore')
LOG.debug('Starting restore of backup from TSM '
'to volume %(volume_id)s, '
'backup: %(backup_id)s, '
'mode: %(mode)s.',
{'volume_id': volume_id,
'backup_id': backup.id,
'mode': backup_mode})
# volume_path is the path to restore into. This may
# be different than the original volume.
volume_path, unused = _get_volume_realpath(volume_file,
volume_id)
restore_path = _create_unique_device_link(backup.id,
volume_path,
volume_id,
backup_mode)
try:
self._do_restore(backup_path, restore_path, volume_id, backup_mode)
except processutils.ProcessExecutionError as exc:
err = (_('restore: %(vol_id)s failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': restore_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('restore: %(vol_id)s failed to run dsmc '
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': restore_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
finally:
_cleanup_device_hardlink(restore_path, volume_path, volume_id)
LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.',
{'backup_id': backup.id,
'volume_id': volume_id})
def delete(self, backup):
"""Delete the given backup from TSM server.
:param backup: backup information for volume
:raises: InvalidBackup
"""
delete_attrs = {'Total number of objects deleted': '1'}
delete_path, backup_mode = _get_backup_metadata(backup, 'restore')
LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.',
{'backup': backup.id,
'mode': backup_mode})
try:
out, err = utils.execute('dsmc',
'delete',
'backup',
'-quiet',
'-noprompt',
'-objtype=%s' % backup_mode,
'-password=%s' % self.tsm_password,
delete_path,
run_as_root=True,
check_exit_code=False)
except processutils.ProcessExecutionError as exc:
err = (_('delete: %(vol_id)s failed to run dsmc with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': backup.volume_id,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('delete: %(vol_id)s failed to run dsmc '
'due to invalid arguments with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': backup.volume_id,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
success = _check_dsmc_output(out, delete_attrs)
if not success:
# log error if tsm cannot delete the backup object
# but do not raise exception so that storage backup
# object can be removed.
LOG.error(_LE('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s'),
{'vol_id': backup.volume_id,
'out': out,
'err': err})
LOG.debug('Delete %s finished.', backup['id'])
def get_backup_driver(context):
return TSMBackupDriver(context)
| apache-2.0 | 7,567,753,356,534,532,000 | 38.065543 | 79 | 0.540386 | false |
d3m3vilurr/libfeedly | tests/util_test.py | 1 | 1938 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
import libfeedly
import libfeedly.utils as utils
def test_user_id():
assert utils.user_id('00000000-0000-0000-0000-000000000000') \
== 'user/00000000-0000-0000-0000-000000000000'
def test_feed_id():
assert utils.feed_id('http://some/rss') == 'feed/http://some/rss'
assert utils.feed_id('http://some/rss', escape=True) \
== 'feed%2Fhttp%3A%2F%2Fsome%2Frss'
def test_category_id():
assert utils.category_id('user/abc', 'a') == 'user/abc/category/a'
assert utils.category_id('user/abc', '가나다') \
== 'user/abc/category/가나다'
assert utils.category_id('user/abc', '가나다', escape=True) \
== 'user%2Fabc%2Fcategory%2F%EA%B0%80%EB%82%98%EB%8B%A4'
assert utils.category_id('user/abc', b'abcd', escape=True) \
== 'user%2Fabc%2Fcategory%2Fabcd'
def test_tag_id():
assert utils.tag_id('user/abc', 'a') == 'user/abc/tag/a'
assert utils.tag_id('user/abc', '가나다') \
== 'user/abc/tag/가나다'
assert utils.tag_id('user/abc', '가나다', escape=True) \
== 'user%2Fabc%2Ftag%2F%EA%B0%80%EB%82%98%EB%8B%A4'
assert utils.tag_id('user/abc', b'abcd', escape=True) \
== 'user%2Fabc%2Ftag%2Fabcd'
def test_parse_oauth_code():
assert utils.parse_oauth_code('http://some/?code=abcde&scope=') == 'abcde'
assert utils.parse_oauth_code('http://some/?code=abc%20de&scope=') \
== 'abc%20de'
assert utils.parse_oauth_code('http://some/?code=abc+de&scope=') \
== 'abc+de'
assert utils.parse_oauth_code('http://some/?scope=&code=abcde') \
== 'abcde'
assert utils.parse_oauth_code('http://some/?code=abcde') \
== 'abcde'
assert utils.parse_oauth_code('http://some/?code=abcde&s1=1&s2=2') \
== 'abcde'
assert not utils.parse_oauth_code('http://some/')
| mit | -4,629,566,950,636,746,000 | 40.347826 | 78 | 0.60673 | false |
micaiahparker/dicebot | cog/tic.py | 1 | 4405 | from discord.ext.commands import command, group
from .cog import Cog
from random import choice
def get_column(i, grid):
return [row[i] for row in grid]
def check_row(row):
return row == "XXX" or row == "OOO"
def get_diags(grid):
return ("".join(str(grid[x][y]) for x, y in [(0, 0), (1, 1),(2, 2)]),
"".join(str(grid[x][y]) for x, y in [(0, 2), (1, 1),(2, 0)]))
class Board:
def __init__(self):
self.grid = [
[' ' for _ in range(3)] for _ in range(3)
]
self.turn = None
def move(self, player:int, x:int, y:int):
if self.grid[x][y] != ' ':
raise Exception("taken")
else:
self.grid[x][y] = ['X', 'O'][player]
def is_draw(self):
for row in self.grid:
for col in row:
if col == ' ':
return False
return True
def is_won(self):
for row in self.grid:
if check_row("".join(str(x) for x in row)):
return True
for i in range(3):
for col in get_column(i, self.grid):
if check_row("".join(str(x) for x in col)):
return True
for row in get_diags(self.grid):
if check_row("".join(str(x) for x in row)):
return True
return False
def convert(self, s):
if s == " ":
return ":eggplant:"
if s == "X":
return ":x:"
if s == "O":
return ":o:"
return s
def __str__(self):
return "\n".join(''.join(self.convert(col) for col in row) for row in self.grid)
class TicTacToe(Cog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.players = []
self.i_turn = None
self.turn = 0
self.board = Board()
@group(pass_context=True)
async def tic(self, ctx):
"""plays tic tac toe, neat huh"""
if not ctx.invoked_subcommand:
pass
@tic.command(aliases=['board'])
async def show(self):
await self.bot.say(self.board)
@tic.command(pass_context=True)
async def start(self, ctx):
if not self.players:
self.players.append(ctx.message.author)
await self.bot.reply('Players: {}'.format(', '.join(str(p) for p in self.players)))
else:
await self.bot.reply('Game already started')
@tic.command(pass_context=True)
async def accept(self, ctx):
if len(self.players) == 1:
self.players.append(ctx.message.author)
await self.bot.reply('Players: {}'.format(', '.join(str(p) for p in self.players)))
self.i_turn = choice([0, 1])
self.turn = self.players[self.i_turn]
await self.say_turn()
else:
await self.bot.reply('Either game not started or max players')
@tic.command(pass_context=True)
async def move(self, ctx, x:int, y:int):
if not ctx.message.author == self.turn:
await self.bot.reply('Not ur turn')
return None
try:
self.board.move(self.i_turn, x, y)
if self.board.is_draw():
await self.bot.say('Draw!')
self.end_game()
elif self.board.is_won():
await self.bot.say(self.board)
await self.bot.say("{} won!".format(self.turn))
self.end_game()
else:
await self.change_turn()
except Exception as e:
await self.bot.reply('try again')
@tic.command(aliases=['players'])
async def get_players(self):
await self.bot.say(', '.join(str(p) for p in self.players))
@tic.command(aliases=['turn'])
async def get_turn(self):
await self.say_turn()
async def say_turn(self):
if self.turn:
await self.bot.say("It is {}'s turn".format(self.turn.mention))
else:
await self.bot.say("No current game")
async def change_turn(self):
self.i_turn = (self.i_turn + 1) % 2
self.turn = self.players[self.i_turn]
await self.say_turn()
await self.bot.say(self.board)
def end_game(self):
self.board = Board()
self.players = []
self.turn = None
self.i_turn = 0
def setup(bot):
bot.add_cog(TicTacToe(bot))
| mit | 243,149,239,420,478,900 | 28.965986 | 95 | 0.516005 | false |
dtroyer/cliff | cliff/hooks.py | 1 | 1974 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class CommandHook(object):
"""Base class for command hooks.
:param app: Command instance being invoked
:paramtype app: cliff.command.Command
"""
def __init__(self, command):
self.cmd = command
@abc.abstractmethod
def get_parser(self, parser):
"""Return an :class:`argparse.ArgumentParser`.
:param parser: An existing ArgumentParser instance to be modified.
:paramtype parser: ArgumentParser
:returns: ArgumentParser
"""
return parser
@abc.abstractmethod
def get_epilog(self):
"Return text to add to the command help epilog."
return ''
@abc.abstractmethod
def before(self, parsed_args):
"""Called before the command's take_action() method.
:param parsed_args: The arguments to the command.
:paramtype parsed_args: argparse.Namespace
:returns: argparse.Namespace
"""
return parsed_args
@abc.abstractmethod
def after(self, parsed_args, return_code):
"""Called after the command's take_action() method.
:param parsed_args: The arguments to the command.
:paramtype parsed_args: argparse.Namespace
:param return_code: The value returned from take_action().
:paramtype return_code: int
:returns: int
"""
return return_code
| apache-2.0 | 8,834,267,734,440,203,000 | 29.369231 | 76 | 0.668186 | false |
summanlp/evaluation | rouge_calculator.py | 1 | 3150 | import os
import os.path
from tempfile import mkdtemp
from shutil import rmtree
from evaluation_constants import MODEL_SUMMARIES_PATTERN, SYSTEM_SUMMARIES_PATTERN
from pyrouge import Rouge155
""" Class that runs ROUGE to compare the output of a custom summarization tool
comparing it to a 'gold standard' reference summary.
"""
ROUGE_PATH = os.path.join(os.getcwd(), 'ROUGE-RELEASE-1.5.5')
ROUGE_DATA_PATH = os.path.join(ROUGE_PATH, 'data')
SYSTEM_DIR = "system"
MODEL_DIR = "model"
CONFIG_FILENAME = "config.xml"
# Rouge options as used in the DUC2007 competition:
# http://www-nlpir.nist.gov/projects/duc/duc2007/tasks.html#main
ROUGE_OPTIONS = [
'-e', ROUGE_DATA_PATH, # Specify ROUGE_EVAL_HOME directory where the ROUGE data files can be found.
'-n', '2', # Compute ROUGE-1 and ROUGE-2.
'-x', # Do not calculate ROUGE-L.
'-m', # Apply Porter stemmer on both models and peers.
'-2', '4', # Compute skip bigram (ROGUE-S) co-occurrence with a maximum skip distance of 4,
'-u', # Include unigram in Skip Bigram (ROUGE-S).
'-c', '95', # Specify CF\% (0 <= CF <= 100) confidence interval to compute.
'-r', '1000', # Specify the number of sampling point in bootstrap resampling (default is 1000).
'-f', 'A', # Scores are averaged over multiple models.
'-p', '0.5', # Compute F-measure with alpha = 0.5.
'-t', '0', # Use model unit as the counting unit.
'-a' # Evaluate all systems.
]
def create_temporary_directories():
tempdir = mkdtemp()
# Creates the temp directories to hold the rouge files.
new_system_dir = os.path.join(tempdir, SYSTEM_DIR)
os.mkdir(new_system_dir)
new_model_dir = os.path.join(tempdir, MODEL_DIR)
os.mkdir(new_model_dir)
return tempdir
def evaluate_summary(model_directory, system_directory):
tempdir = create_temporary_directories()
rouge_instance = Rouge155(ROUGE_PATH, verbose=False, rouge_args=' '.join(ROUGE_OPTIONS))
# Converts the gold references files to rouge format.
model_input_dir = model_directory
model_output_dir = os.path.join(tempdir, MODEL_DIR)
rouge_instance.convert_summaries_to_rouge_format(model_input_dir, model_output_dir)
# Converts the summary file to rouge format.
system_output_dir = os.path.join(tempdir, SYSTEM_DIR)
rouge_instance.convert_summaries_to_rouge_format(system_directory, system_output_dir)
# Writes the configuration file.
config_filename = os.path.join(tempdir, CONFIG_FILENAME)
rouge_instance.write_config_static(system_output_dir, SYSTEM_SUMMARIES_PATTERN,
model_output_dir, MODEL_SUMMARIES_PATTERN,
config_filename, 1)
# Runs ROUGE comparing the gold reference summaries with the recently generated.
output = rouge_instance.evaluate_static(ROUGE_PATH, config_filename, ROUGE_OPTIONS)
# Removes the temporal directories.
rmtree(tempdir)
return rouge_instance.output_to_dict(output)
| mit | 5,012,476,529,996,848,000 | 41 | 109 | 0.658095 | false |
yipenggao/moose | python/TestHarness/schedulers/Job.py | 1 | 4797 | import re
from timeit import default_timer as clock
class Timer(object):
"""
A helper class for testers to track the time it takes to run.
Every call to the start method must be followed by a call to stop.
"""
def __init__(self):
self.starts = []
self.ends = []
def start(self):
""" starts the timer clock """
self.starts.append(clock())
def stop(self):
""" stop/pauses the timer clock """
self.ends.append(clock())
def cumulativeDur(self):
""" returns the total/cumulative time taken by the timer """
diffs = [end - start for start, end in zip(self.starts, self.ends)]
return sum(diffs)
def avgerageDur(self):
return self.cumulativeDur() / len(self.starts)
def nRuns(self):
return len(self.starts)
def reset(self):
self.starts = []
self.ends = []
class Job(object):
"""
The Job class is a simple container for the tester and its associated output file object, the DAG,
the process object, the exit codes, and the start and end times.
"""
def __init__(self, tester, tester_dag, options):
self.options = options
self.__tester = tester
self.timer = Timer()
self.__dag = tester_dag
self.__dag_clone = None
self.__outfile = None
self.__start_time = clock()
self.__end_time = None
self.__std_out = ''
self.report_timer = None
def getTester(self):
""" Return the tester object """
return self.__tester
def getDAG(self):
""" Return the DAG object """
return self.__dag
def getOriginalDAG(self):
"""
Retreive the DAG object from the state it was when setOriginalDAG was called or the current
state it is in now, if setOriginalDAG was never called.
"""
return self.setOriginalDAG()
def setOriginalDAG(self):
"""
Create a soft clone of the working DAG for what ever state it is currently in. This method
should only be called once, and once the working DAG is properly set up.
This is to protect the DAG from further tampering.
"""
if self.__dag_clone == None:
self.__dag_clone = self.__dag.clone()
return self.__dag_clone
def getTestName(self):
""" Wrapper method to return the testers test name """
return self.__tester.getTestName()
def run(self):
"""
A blocking method to handle the exit status of the process object while keeping track of the
time the process was active. When the process exits, read the output and close the file.
"""
self.__tester.prepare(self.options)
if self.options.dry_run or not self.__tester.shouldExecute():
self.__tester.setStatus(self.__tester.getSuccessMessage(), self.__tester.bucket_success)
return
self.__start_time = clock()
self.timer.reset()
self.__tester.run(self.timer, self.options)
self.__start_time = self.timer.starts[0]
self.__end_time = self.timer.ends[-1]
self.__std_out = self.__tester.std_out
def killProcess(self):
""" Kill remaining process that may be running """
self.__tester.killCommand()
def getStartTime(self):
""" Return the time the process started """
return self.__start_time
def getEndTime(self):
""" Return the time the process exited """
return self.__end_time
def getOutput(self):
""" Return the contents of output """
return self.__std_out
def setOutput(self, output):
""" Method to allow testers to overwrite the output if certain conditions are met """
if self.__tester.outfile is not None and not self.__tester.outfile.closed:
return
self.__std_out = output
def getActiveTime(self):
""" Return active time """
m = re.search(r"Active time=(\S+)", self.__std_out)
if m != None:
return m.group(1)
def getSolveTime(self):
""" Return solve time """
m = re.search(r"solve().*", self.__std_out)
if m != None:
return m.group().split()[5]
def getTiming(self):
""" Return active time if available, if not return a comparison of start and end time """
if self.getActiveTime():
return self.getActiveTime()
elif self.getEndTime() and self.getStartTime():
return self.timer.cumulativeDur()
elif self.getStartTime() and self.__tester.isPending():
# If the test is still running, return current run time instead
return max(0.0, clock() - self.getStartTime())
else:
return 0.0
| lgpl-2.1 | -4,338,443,902,956,988,400 | 32.3125 | 102 | 0.591828 | false |
danhuss/faker | faker/providers/ssn/es_MX/__init__.py | 1 | 5840 | """
SSN provider for es_MX.
This module adds a provider for mexican SSN, along with Unique Population
Registry Code (CURP) and Federal Taxpayer Registry ID (RFC).
"""
import random
import string
from .. import Provider as BaseProvider
ALPHABET = string.ascii_uppercase
ALPHANUMERIC = string.digits + ALPHABET
VOWELS = "AEIOU"
CONSONANTS = [
letter
for letter in ALPHABET
if letter not in VOWELS
]
# https://es.wikipedia.org/wiki/Plantilla:Abreviaciones_de_los_estados_de_M%C3%A9xico
STATES_RENAPO = [
"AS",
"BC",
"BS",
"CC",
"CS",
"CH",
"DF",
"CL",
"CM",
"DG",
"GT",
"GR",
"HG",
"JC",
"MC",
"MN",
"MS",
"NT",
"NL",
"OC",
"PL",
"QO",
"QR",
"SP",
"SL",
"SR",
"TC",
"TS",
"TL",
"VZ",
"YN",
"ZS",
"NE", # Foreign Born
]
FORBIDDEN_WORDS = {
"BUEI": "BUEX",
"BUEY": "BUEX",
"CACA": "CACX",
"CACO": "CACX",
"CAGA": "CAGX",
"CAGO": "CAGX",
"CAKA": "CAKX",
"CAKO": "CAKX",
"COGE": "COGX",
"COJA": "COJX",
"COJE": "COJX",
"COJI": "COJX",
"COJO": "COJX",
"CULO": "CULX",
"FETO": "FETX",
"GUEY": "GUEX",
"JOTO": "JOTX",
"KACA": "KACX",
"KACO": "KACX",
"KAGA": "KAGX",
"KAGO": "KAGX",
"KOGE": "KOGX",
"KOJO": "KOJX",
"KAKA": "KAKX",
"KULO": "KULX",
"MAME": "MAMX",
"MAMO": "MAMX",
"MEAR": "MEAX",
"MEAS": "MEAX",
"MEON": "MEOX",
"MION": "MIOX",
"MOCO": "MOCX",
"MULA": "MULX",
"PEDA": "PEDX",
"PEDO": "PEDX",
"PENE": "PENX",
"PUTA": "PUTX",
"PUTO": "PUTX",
"QULO": "QULX",
"RATA": "RATX",
"RUIN": "RUIN",
}
CURP_CHARACTERS = "0123456789ABCDEFGHIJKLMNÑOPQRSTUVWXYZ"
def _reduce_digits(number):
"""
Sum of digits of a number until sum becomes single digit.
Example:
658 => 6 + 5 + 8 = 19 => 1 + 9 = 10 => 1
"""
if number == 0:
return 0
if number % 9 == 0:
return 9
return number % 9
def ssn_checksum(digits):
"""
Calculate the checksum for the mexican SSN (IMSS).
"""
return -sum(
_reduce_digits(n * (i % 2 + 1))
for i, n in enumerate(digits)
) % 10
def curp_checksum(characters):
"""
Calculate the checksum for the mexican CURP.
"""
start = 18
return -sum(
(start - i) * CURP_CHARACTERS.index(n)
for i, n in enumerate(characters)
) % 10
class Provider(BaseProvider):
"""
A Faker provider for the Mexican SSN, RFC and CURP
"""
ssn_formats = ("###########",)
def ssn(self):
"""
Mexican Social Security Number, as given by IMSS.
:return: a random Mexican SSN
"""
office = self.random_int(min=1, max=99)
birth_year = self.random_int(min=0, max=99)
start_year = self.random_int(min=0, max=99)
serial = self.random_int(min=1, max=9999)
num = "{:02d}{:02d}{:02d}{:04d}".format(
office,
start_year,
birth_year,
serial,
)
check = ssn_checksum(map(int, num))
num += str(check)
return num
def curp(self):
"""
See https://es.wikipedia.org/wiki/Clave_%C3%9Anica_de_Registro_de_Poblaci%C3%B3n.
:return: a random Mexican CURP (Unique Population Registry Code)
"""
birthday = self.generator.date_of_birth()
first_surname = random.choice(ALPHABET) + random.choice(VOWELS)
second_surname = random.choice(ALPHABET)
given_name = random.choice(ALPHABET)
name_initials = first_surname + second_surname + given_name
birth_date = birthday.strftime("%y%m%d")
gender = random.choice("HM")
state = random.choice(STATES_RENAPO)
first_surname_inside = random.choice(CONSONANTS)
second_surname_inside = random.choice(CONSONANTS)
given_name_inside = random.choice(ALPHABET)
# This character is assigned to avoid duplicity
# It's normally '0' for those born < 2000
# and 'A' for those born >= 2000
assigned_character = "0" if birthday.year < 2000 else "A"
name_initials = FORBIDDEN_WORDS.get(name_initials, name_initials)
random_curp = (
name_initials +
birth_date +
gender +
state +
first_surname_inside +
second_surname_inside +
given_name_inside +
assigned_character
)
random_curp += str(curp_checksum(random_curp))
return random_curp
def rfc(self, natural=True):
"""
See https://es.wikipedia.org/wiki/Registro_Federal_de_Contribuyentes
:param natural: Whether to return the RFC of a natural person.
Otherwise return the RFC of a legal person.
:type natural: bool
:return: a random Mexican RFC
"""
birthday = self.generator.date_of_birth()
if natural:
first_surname = random.choice(ALPHABET) + random.choice(VOWELS)
second_surname = random.choice(ALPHABET)
given_name = random.choice(ALPHABET)
name_initials = first_surname + second_surname + given_name
else:
name_initials = (
self.random_uppercase_letter() +
self.random_uppercase_letter() +
self.random_uppercase_letter()
)
birth_date = birthday.strftime("%y%m%d")
disambiguation_code = (
random.choice(ALPHANUMERIC) +
random.choice(ALPHANUMERIC) +
random.choice(ALPHANUMERIC)
)
random_rfc = (
name_initials +
birth_date +
disambiguation_code
)
return random_rfc
| mit | 8,095,526,841,442,611,000 | 22.449799 | 89 | 0.533996 | false |
vjorlikowski/plexus | plexus/ofctl.py | 1 | 20498 | # Copyright (c) 2015 Duke University.
# This software is distributed under the terms of the MIT License,
# the text of which is included in this distribution within the file
# named LICENSE.
#
# Portions of this software are derived from the "rest_router" controller
# application included with Ryu (http://osrg.github.io/ryu/), which is:
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Modifications and additions were made to the original content by the
# following authors:
# Author: Victor J. Orlikowski <[email protected]>
from ryu.exception import OFPUnknownVersion
from plexus import *
from plexus.util import *
class OfCtl(object):
_OF_VERSIONS = {}
@staticmethod
def register_of_version(version):
def _register_of_version(cls):
OfCtl._OF_VERSIONS.setdefault(version, cls)
return cls
return _register_of_version
@staticmethod
def factory(dp, logger):
of_version = dp.ofproto.OFP_VERSION
if of_version in OfCtl._OF_VERSIONS:
ofctl = OfCtl._OF_VERSIONS[of_version](dp, logger)
else:
raise OFPUnknownVersion(version=of_version)
return ofctl
def __init__(self, dp, logger):
super(OfCtl, self).__init__()
self.dp = dp
self.sw_id = {'sw_id': dpid_lib.dpid_to_str(dp.id)}
self.logger = logger
def set_sw_config_for_ttl(self):
# OpenFlow v1_2/1_3.
pass
def clear_flows(self):
# Abstract method
raise NotImplementedError()
def set_flow(self, cookie, priority,
in_port=None,
dl_type=0, dl_src=0, dl_dst=0, dl_vlan=0,
nw_src=0, src_mask=32, nw_dst=0, dst_mask=32,
nw_proto=0, idle_timeout=0, hard_timeout=0,
flags=0, actions=None):
# Abstract method
raise NotImplementedError()
def send_arp(self, arp_opcode, vlan_id, src_mac, dst_mac,
src_ip, dst_ip, arp_target_mac, in_port, output):
# Generate ARP packet
if vlan_id != VLANID_NONE:
ether_proto = ether.ETH_TYPE_8021Q
pcp = 0
cfi = 0
vlan_ether = ether.ETH_TYPE_ARP
v = vlan.vlan(pcp, cfi, vlan_id, vlan_ether)
else:
ether_proto = ether.ETH_TYPE_ARP
hwtype = 1
arp_proto = ether.ETH_TYPE_IP
hlen = 6
plen = 4
pkt = packet.Packet()
e = ethernet.ethernet(dst_mac, src_mac, ether_proto)
a = arp.arp(hwtype, arp_proto, hlen, plen, arp_opcode,
src_mac, src_ip, arp_target_mac, dst_ip)
pkt.add_protocol(e)
if vlan_id != VLANID_NONE:
pkt.add_protocol(v)
pkt.add_protocol(a)
pkt.serialize()
# Send packet out
self.send_packet_out(in_port, output, pkt.data, data_str=str(pkt))
def send_icmp(self, in_port, protocol_list, vlan_id, icmp_type,
icmp_code, icmp_data=None, msg_data=None, src_ip=None, out_port=None):
# Generate ICMP reply packet
csum = 0
offset = ethernet.ethernet._MIN_LEN
if vlan_id != VLANID_NONE:
ether_proto = ether.ETH_TYPE_8021Q
pcp = 0
cfi = 0
vlan_ether = ether.ETH_TYPE_IP
v = vlan.vlan(pcp, cfi, vlan_id, vlan_ether)
offset += vlan.vlan._MIN_LEN
else:
ether_proto = ether.ETH_TYPE_IP
eth = protocol_list[ETHERNET]
e = ethernet.ethernet(eth.src, eth.dst, ether_proto)
ip = protocol_list[IPV4]
if icmp_data is None and msg_data is not None:
# RFC 4884 says that we should send "at least 128 octets"
# if we are using the ICMP Extension Structure.
# We're not using the extension structure, but let's send
# up to 128 bytes of the original msg_data.
#
# RFC 4884 also states that the length field is interpreted in
# 32 bit units, so the length calculated in bytes needs to first
# be divided by 4, then increased by 1 if the modulus is non-zero.
#
# Finally, RFC 4884 says, if we're specifying the length, we MUST
# zero pad to the next 32 bit boundary.
end_of_data = offset + len(ip) + 128
ip_datagram = bytearray()
ip_datagram += msg_data[offset:end_of_data]
data_len = int(len(ip_datagram) / 4)
length_modulus = int(len(ip_datagram) % 4)
if length_modulus:
data_len += 1
ip_datagram += bytearray([0]*(4 - length_modulus))
if icmp_type == icmp.ICMP_DEST_UNREACH:
icmp_data = icmp.dest_unreach(data_len=data_len,
data=ip_datagram)
elif icmp_type == icmp.ICMP_TIME_EXCEEDED:
icmp_data = icmp.TimeExceeded(data_len=data_len,
data=ip_datagram)
ic = icmp.icmp(icmp_type, icmp_code, csum, data=icmp_data)
if src_ip is None:
src_ip = ip.dst
ip_total_length = ip.header_length * 4 + ic._MIN_LEN
if ic.data is not None:
ip_total_length += ic.data._MIN_LEN
if ic.data.data is not None:
ip_total_length += len(ic.data.data)
i = ipv4.ipv4(ip.version, ip.header_length, ip.tos,
ip_total_length, ip.identification, ip.flags,
ip.offset, DEFAULT_TTL, inet.IPPROTO_ICMP, csum,
src_ip, ip.src)
pkt = packet.Packet()
pkt.add_protocol(e)
if vlan_id != VLANID_NONE:
pkt.add_protocol(v)
pkt.add_protocol(i)
pkt.add_protocol(ic)
pkt.serialize()
if out_port is None:
out_port = self.dp.ofproto.OFPP_IN_PORT
# Send packet out
self.send_packet_out(in_port, out_port,
pkt.data, data_str=str(pkt))
def send_packet_out(self, in_port, output, data, data_str=None):
actions = [self.dp.ofproto_parser.OFPActionOutput(output, 0)]
self.dp.send_packet_out(buffer_id=UINT32_MAX, in_port=in_port,
actions=actions, data=data)
#TODO: Packet library convert to string
#if data_str is None:
# data_str = str(packet.Packet(data))
#self.logger.debug('Packet out = %s', data_str)
def set_packetin_flow(self, cookie, priority, dl_type=0, dl_dst=0,
dl_vlan=0, dst_ip=0, dst_mask=32, src_ip=0, src_mask=32, nw_proto=0):
actions = [self.dp.ofproto_parser.OFPActionOutput(
self.dp.ofproto.OFPP_CONTROLLER,
self.dp.ofproto.OFPCML_NO_BUFFER)]
self.set_flow(cookie, priority, dl_type=dl_type, dl_dst=dl_dst,
dl_vlan=dl_vlan, nw_dst=dst_ip, dst_mask=dst_mask,
nw_src=src_ip, src_mask=src_mask, nw_proto=nw_proto, actions=actions)
def send_stats_request(self, stats, waiters):
self.dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(self.dp.id, {})
event = hub.Event()
msgs = []
waiters_per_dp[stats.xid] = (event, msgs)
self.dp.send_msg(stats)
try:
event.wait(timeout=OFP_REPLY_TIMER)
except hub.Timeout:
del waiters_per_dp[stats.xid]
return msgs
@OfCtl.register_of_version(ofproto_v1_0.OFP_VERSION)
class OfCtl_v1_0(OfCtl):
def __init__(self, dp, logger):
super(OfCtl_v1_0, self).__init__(dp, logger)
def clear_flows(self):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
mod = ofp_parser.OFPFlowMod(
datapath=self.dp,
match=ofp_parser.OFPMatch(),
cookie=0,
command=ofp.OFPFC_DELETE,
priority=ofp.OFP_DEFAULT_PRIORITY,
actions=[])
self.dp.send_msg(mod)
def get_packetin_inport(self, msg):
return msg.in_port
def get_all_flow(self, waiters):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
match = ofp_parser.OFPMatch(ofp.OFPFW_ALL, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0)
stats = ofp_parser.OFPFlowStatsRequest(self.dp, 0, match,
0xff, ofp.OFPP_NONE)
return self.send_stats_request(stats, waiters)
def get_match_dst_ip(self, match):
return match.nw_dst
def set_flow(self, cookie, priority,
in_port=None,
dl_type=0, dl_src=0, dl_dst=0, dl_vlan=0,
nw_src=0, src_mask=32, nw_dst=0, dst_mask=32,
src_port=0, dst_port=0,
nw_proto=0, idle_timeout=0, hard_timeout=0,
flags=0, actions=None):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
cmd = ofp.OFPFC_ADD
# Match
wildcards = ofp.OFPFW_ALL
if in_port is not None:
wildcards &= ~ofp.OFPFW_IN_PORT
if dl_type:
wildcards &= ~ofp.OFPFW_DL_TYPE
if dl_src:
wildcards &= ~ofp.OFPFW_DL_SRC
if dl_dst:
wildcards &= ~ofp.OFPFW_DL_DST
if dl_vlan:
wildcards &= ~ofp.OFPFW_DL_VLAN
if nw_src:
v = (32 - src_mask) << ofp.OFPFW_NW_SRC_SHIFT | \
~ofp.OFPFW_NW_SRC_MASK
wildcards &= v
nw_src = ipv4_text_to_int(nw_src)
if nw_dst:
v = (32 - dst_mask) << ofp.OFPFW_NW_DST_SHIFT | \
~ofp.OFPFW_NW_DST_MASK
wildcards &= v
nw_dst = ipv4_text_to_int(nw_dst)
if src_port:
wildcards &= ~ofp.OFPFW_TP_SRC
if dst_port:
wildcards &= ~ofp.OFPFW_TP_DST
if nw_proto:
wildcards &= ~ofp.OFPFW_NW_PROTO
match = ofp_parser.OFPMatch(wildcards, in_port, dl_src, dl_dst, dl_vlan, 0,
dl_type, 0, nw_proto,
nw_src, nw_dst, src_port, dst_port)
flags = flags
actions = actions or []
m = ofp_parser.OFPFlowMod(self.dp, match, cookie, cmd,
idle_timeout=idle_timeout, hard_timeout=hard_timeout,
priority=priority, flags=flags, actions=actions)
self.dp.send_msg(m)
def set_routing_flow(self, cookie, priority, outport,
in_port=None, dl_vlan=0,
nw_src=0, src_mask=32, nw_dst=0, dst_mask=32,
src_port=0, dst_port=0, src_mac=0, dst_mac=0,
nw_proto=0, idle_timeout=0, hard_timeout=0,
**dummy):
ofp_parser = self.dp.ofproto_parser
dl_type = ether.ETH_TYPE_IP
flags = self.dp.ofproto.OFPFF_CHECK_OVERLAP
#flags = 0
# Decrement TTL value is not supported at OpenFlow V1.0
actions = []
if src_mac:
actions.append(ofp_parser.OFPActionSetDlSrc(
mac_lib.haddr_to_bin(src_mac)))
if dst_mac:
actions.append(ofp_parser.OFPActionSetDlDst(
mac_lib.haddr_to_bin(dst_mac)))
if outport is not None:
actions.append(ofp_parser.OFPActionOutput(outport))
self.set_flow(cookie, priority,
in_port=in_port, dl_type=dl_type, dl_vlan=dl_vlan,
nw_src=nw_src, src_mask=src_mask,
nw_dst=nw_dst, dst_mask=dst_mask,
src_port=src_port, dst_port=dst_port,
nw_proto=nw_proto,
idle_timeout=idle_timeout, hard_timeout=hard_timeout,
flags=flags, actions=actions)
def delete_flow(self, flow_stats):
match = flow_stats.match
cookie = flow_stats.cookie
cmd = self.dp.ofproto.OFPFC_DELETE_STRICT
priority = flow_stats.priority
actions = []
flow_mod = self.dp.ofproto_parser.OFPFlowMod(
self.dp, match, cookie, cmd, priority=priority, actions=actions)
self.dp.send_msg(flow_mod)
self.logger.info('Delete flow [cookie=0x%x]', cookie)
class OfCtl_after_v1_2(OfCtl):
def __init__(self, dp, logger):
super(OfCtl_after_v1_2, self).__init__(dp, logger)
def set_sw_config_for_ttl(self):
pass
def clear_flows(self):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
mod = ofp_parser.OFPFlowMod(self.dp, 0, 0, ofp.OFPTT_ALL,
ofp.OFPFC_DELETE, 0, 0, 1, ofp.OFPCML_NO_BUFFER,
ofp.OFPP_ANY, ofp.OFPG_ANY, 0, ofp_parser.OFPMatch(), [])
self.dp.send_msg(mod)
def get_packetin_inport(self, msg):
in_port = self.dp.ofproto.OFPP_ANY
for match_field in msg.match.fields:
if match_field.header == self.dp.ofproto.OXM_OF_IN_PORT:
in_port = match_field.value
break
return in_port
def get_all_flow(self, waiters):
pass
def get_match_dst_ip(self, match):
return match.ipv4_dst
def set_flow(self, cookie, priority,
in_port=None,
dl_type=0, dl_src=0, dl_dst=0, dl_vlan=0,
nw_src=0, src_mask=32, nw_dst=0, dst_mask=32,
src_port=0, dst_port=0,
nw_proto=0, idle_timeout=0, hard_timeout=0,
flags=0, actions=None):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
cmd = ofp.OFPFC_ADD
table_id = 0 # The default is table 0
# Match
match = ofp_parser.OFPMatch()
if in_port is not None:
match.set_in_port(in_port)
if dl_type:
match.set_dl_type(dl_type)
if dl_type == ether.ETH_TYPE_IP:
table_id = 1
if dl_src:
match.set_dl_src(dl_src)
table_id = 0
if dl_dst:
match.set_dl_dst(dl_dst)
table_id = 0
if dl_vlan:
match.set_vlan_vid(dl_vlan)
if nw_src:
match.set_ipv4_src_masked(ipv4_text_to_int(nw_src),
mask_ntob(src_mask))
table_id = 1
if nw_dst:
match.set_ipv4_dst_masked(ipv4_text_to_int(nw_dst),
mask_ntob(dst_mask))
table_id = 1
if nw_proto:
if dl_type == ether.ETH_TYPE_IP:
match.set_ip_proto(nw_proto)
table_id = 1
if src_port:
if nw_proto == inet.IPPROTO_TCP:
match.set_tcp_src(src_port)
elif nw_proto == inet.IPPROTO_UDP:
match.set_udp_src(src_port)
if dst_port:
if nw_proto == inet.IPPROTO_TCP:
match.set_tcp_dst(dst_port)
elif nw_proto == inet.IPPROTO_UDP:
match.set_udp_dst(dst_port)
elif dl_type == ether.ETH_TYPE_ARP:
match.set_arp_opcode(nw_proto)
# FIXME: We're working around the fact that our Aristas have 1 hardware table, and our
# Ciscos have 2, in OF 1.3 mode.
# Right now, we check the number of tables we matched to the datapath.
# What *should* we be doing? Checking table features, and being more clever.
if self.dp.n_tables == 1:
table_id = 0
# Instructions
flags = flags
actions = actions or []
inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions)]
m = ofp_parser.OFPFlowMod(self.dp, cookie, 0, table_id, cmd,
idle_timeout, hard_timeout,
priority, UINT32_MAX, ofp.OFPP_ANY,
ofp.OFPG_ANY, flags, match, inst)
self.dp.send_msg(m)
def set_routing_flow(self, cookie, priority, outport,
in_port=None, dl_vlan=0,
nw_src=0, src_mask=32, nw_dst=0, dst_mask=32,
src_port=0, dst_port=0, src_mac=0, dst_mac=0,
nw_proto=0, idle_timeout=0, hard_timeout=0,
dec_ttl=False):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
dl_type = ether.ETH_TYPE_IP
flags = self.dp.ofproto.OFPFF_CHECK_OVERLAP
#flags = 0
actions = []
if dec_ttl:
actions.append(ofp_parser.OFPActionDecNwTtl())
if src_mac:
actions.append(ofp_parser.OFPActionSetField(eth_src=src_mac))
if dst_mac:
actions.append(ofp_parser.OFPActionSetField(eth_dst=dst_mac))
if outport is not None:
actions.append(ofp_parser.OFPActionOutput(outport, 0))
self.set_flow(cookie, priority,
in_port=in_port, dl_type=dl_type, dl_vlan=dl_vlan,
nw_src=nw_src, src_mask=src_mask,
nw_dst=nw_dst, dst_mask=dst_mask,
src_port=src_port, dst_port=dst_port,
nw_proto=nw_proto,
idle_timeout=idle_timeout, hard_timeout=hard_timeout,
flags=flags, actions=actions)
def delete_flow(self, flow_stats):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
cmd = ofp.OFPFC_DELETE
cookie = flow_stats.cookie
cookie_mask = UINT64_MAX
match = ofp_parser.OFPMatch()
inst = []
flow_mod = ofp_parser.OFPFlowMod(self.dp, cookie, cookie_mask, ofp.OFPTT_ALL, cmd,
0, 0, 0, UINT32_MAX, ofp.OFPP_ANY,
ofp.OFPG_ANY, 0, match, inst)
self.dp.send_msg(flow_mod)
self.logger.info('Delete flow [cookie=0x%x]', cookie)
@OfCtl.register_of_version(ofproto_v1_2.OFP_VERSION)
class OfCtl_v1_2(OfCtl_after_v1_2):
def __init__(self, dp, logger):
super(OfCtl_v1_2, self).__init__(dp, logger)
def set_sw_config_for_ttl(self):
flags = self.dp.ofproto.OFPC_INVALID_TTL_TO_CONTROLLER
miss_send_len = UINT16_MAX
m = self.dp.ofproto_parser.OFPSetConfig(self.dp, flags,
miss_send_len)
self.dp.send_msg(m)
self.logger.info('Set SW config for TTL error packet in.')
def get_all_flow(self, waiters):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
match = ofp_parser.OFPMatch()
stats = ofp_parser.OFPFlowStatsRequest(self.dp, ofp.OFPTT_ALL, ofp.OFPP_ANY,
ofp.OFPG_ANY, 0, 0, match)
return self.send_stats_request(stats, waiters)
@OfCtl.register_of_version(ofproto_v1_3.OFP_VERSION)
class OfCtl_v1_3(OfCtl_after_v1_2):
def __init__(self, dp, logger):
super(OfCtl_v1_3, self).__init__(dp, logger)
def set_sw_config_for_ttl(self):
packet_in_mask = (1 << self.dp.ofproto.OFPR_ACTION |
1 << self.dp.ofproto.OFPR_INVALID_TTL)
port_status_mask = (1 << self.dp.ofproto.OFPPR_ADD |
1 << self.dp.ofproto.OFPPR_DELETE |
1 << self.dp.ofproto.OFPPR_MODIFY)
flow_removed_mask = (1 << self.dp.ofproto.OFPRR_IDLE_TIMEOUT |
1 << self.dp.ofproto.OFPRR_HARD_TIMEOUT |
1 << self.dp.ofproto.OFPRR_DELETE)
m = self.dp.ofproto_parser.OFPSetAsync(
self.dp, [packet_in_mask, 0], [port_status_mask, 0],
[flow_removed_mask, 0])
self.dp.send_msg(m)
self.logger.info('Set SW config for TTL error packet in.')
def get_all_flow(self, waiters):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
match = ofp_parser.OFPMatch()
stats = ofp_parser.OFPFlowStatsRequest(self.dp, 0, ofp.OFPTT_ALL, ofp.OFPP_ANY,
ofp.OFPG_ANY, 0, 0, match)
return self.send_stats_request(stats, waiters)
| apache-2.0 | 3,669,833,747,899,277,000 | 37.171322 | 95 | 0.532686 | false |
Dfenestrator/queryGrapher | queryGrapher.py | 1 | 1457 | from flask.ext.mysql import MySQL
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, jsonify
from contextlib import closing
# configuration
DEBUG = True
# variables for database access
HOST = 'sql3.freemysqlhosting.net'
USER = 'sql3114361'
PASSWD = 'zZl9FPvPV9'
DATABASE = 'sql3114361'
# create application
app = Flask(__name__)
mysql = MySQL()
app.config.from_object(__name__)
app.config['MYSQL_DATABASE_HOST'] = HOST
app.config['MYSQL_DATABASE_USER'] = USER
app.config['MYSQL_DATABASE_PASSWORD'] = PASSWD
app.config['MYSQL_DATABASE_DB'] = DATABASE
mysql.init_app(app)
def connect_db():
return mysql.connect()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
#Custom query
@app.route('/getData', methods=['GET'])
def getData():
result = None
cursor = g.db.cursor()
#get all interesting queries in one shot, divide them up later
query=request.args.get('query')
cursor.execute(query)
# Get column headers
result = [[word[0] for word in cursor.description]]
#get data
queryRows = cursor.fetchall()
for row in queryRows:
result.append(row)
return jsonify(result=result)
@app.route('/')
def mainPg():
return render_template('customQuery.html')
if __name__ == '__main__':
app.run()
| mit | 4,756,431,227,061,877,000 | 21.075758 | 103 | 0.678792 | false |
CLVsol/odoo_api | clv_person.py | 1 | 13970 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from __future__ import print_function
from erppeek import *
from base import *
import argparse
import getpass
from clv_address import *
def clv_person_unlink(client, args):
clv_person = client.model('clv_person')
person_browse = clv_person.browse(args)
i = 0
deleted = 0
not_deleted = 0
for person in person_browse:
i += 1
print(i, person.name.encode("utf-8"))
history = client.model('clv_person.history')
history_browse = history.browse([('person_id', '=', person.id), ])
history_ids = history_browse.id
print('>>>>>', history_ids)
history.unlink(history_ids)
try:
clv_person.unlink(person.id)
deleted += 1
except:
print('>>>>>', 'Not deleted!')
not_deleted += 1
print('--> i: ', i)
print('--> deleted: ', deleted)
print('--> not_deleted: ', not_deleted)
def clv_person_unlink_aExcluir(client):
clv_tag = client.model('clv_tag')
tag_aExcluir = clv_tag.browse([('name', '=', 'aExcluir'), ])[0].id
clv_person = client.model('clv_person')
person_browse = clv_person.browse([])
i = 0
deleted = 0
not_deleted = 0
for person in person_browse:
i += 1
print(i, person.name.encode("utf-8"), person.tag_ids.id)
for tag_id in person.tag_ids.id:
if tag_id == tag_aExcluir:
history = client.model('clv_person.history')
history_browse = history.browse([('person_id', '=', person.id), ])
history_ids = history_browse.id
print('>>>>>', history_ids)
history.unlink(history_ids)
try:
clv_person.unlink(person.id)
deleted += 1
except:
print('>>>>>', 'Not deleted!')
not_deleted += 1
print('--> i: ', i)
print('--> deleted: ', deleted)
print('--> not_deleted: ', not_deleted)
def clv_person_import_remote(remote_client, local_client):
clv_address = local_client.model('clv_address')
local_clv_person = local_client.model('clv_person')
remote_clv_person = remote_client.model('clv_person')
remote_person_browse = remote_clv_person.browse([])
i = 0
person_count = 0
address_count = 0
spouse_count = 0
father_count = 0
mother_count = 0
responsible_count = 0
for person in remote_person_browse:
person_count += 1
print(person_count, person.code, person.name.encode("utf-8"), person.tag_ids, person.category_ids)
print('>>>>>', person.gender, person.birthday)
address_id = False
if person.address_id is not False:
print('>>>>>', person.address_id.name.encode("utf-8"))
if person.address_id.street is not False:
print('>>>>>>>>>>', person.address_id.street.encode("utf-8"),
person.address_id.number)
if person.address_id.district is not False:
print('>>>>>>>>>>', person.address_id.district.encode("utf-8"))
address_id = clv_address.browse([('name', '=', person.address_id.name), ]).id
if address_id == []:
values = {
'name': person.address_id.name,
'street': person.address_id.street,
'number': person.address_id.number,
'district': person.address_id.district,
}
address_id = clv_address.create(values).id
address_count += 1
else:
address_id = address_id[0]
values = {
'name': person.name,
'code': person.code,
'birthday': person.birthday,
'gender': person.gender,
'address_id': address_id,
'date_inclusion': person.date_inclusion,
}
local_person_id = local_clv_person.create(values).id
i = 0
for person in remote_person_browse:
i += 1
local_person = local_clv_person.browse([('code', '=', person.code), ])[0]
print(i, local_person.code, local_person.name.encode("utf-8"))
if person.spouse_id is not False:
spouse_count += 1
spouse = local_clv_person.browse([('code', '=', person.spouse_id.code), ])[0]
print('>>>>> spouse', spouse.code, spouse.name.encode("utf-8"))
values = {
'spouse_id': spouse.id,
}
local_clv_person.write(local_person.id, values)
if person.father_id is not False:
father_count += 1
father = local_clv_person.browse([('code', '=', person.father_id.code), ])[0]
print('>>>>> father', father.code, father.name.encode("utf-8"))
values = {
'father_id': father.id,
}
local_clv_person.write(local_person.id, values)
if person.mother_id is not False:
mother_count += 1
mother = local_clv_person.browse([('code', '=', person.mother_id.code), ])[0]
print('>>>>> mother', mother.code, mother.name.encode("utf-8"))
values = {
'mother_id': mother.id,
}
local_clv_person.write(local_person.id, values)
if person.responsible_id is not False:
responsible_count += 1
responsible = local_clv_person.browse([('code', '=', person.responsible_id.code), ])[0]
print('>>>>> responsible', responsible.code, responsible.name.encode("utf-8"))
values = {
'responsible_id': responsible.id,
}
local_clv_person.write(local_person.id, values)
print('i: ', i)
print('person_count: ', person_count)
print('address_count: ', address_count)
print('spouse_count: ', spouse_count)
print('father_count: ', father_count)
print('mother_count: ', mother_count)
print('responsible_count: ', responsible_count)
def clv_person_check_address(client, args):
tag_id_Verificar_Endereco = get_tag_id(
client,
'Verificar Endereço',
'Registro necessitando verificação do Endereço.')
clv_person = client.model('clv_person')
person_browse = clv_person.browse(args)
i = 0
address_verify = 0
for person in person_browse:
i += 1
print(i, person.name.encode("utf-8"), person.address_id.id,
person.family_member_ids.family_id.address_id[0].id)
if person.address_id.id != person.family_member_ids.family_id.address_id[0].id:
address_verify += 1
values = {
'tag_ids': [(4, tag_id_Verificar_Endereco)],
}
clv_person.write(person.id, values)
print('--> i: ', i)
print('--> address_verify: ', address_verify)
def clv_person_export_jcafb(client, file_path):
headings_person = ['no',
'código da pessoa', 'nome',
'código do paciente', 'data de nascimento',
'endereço',
'categorias',
]
file_person = open(file_path, 'wb')
writer_person = csv.writer(file_person, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
writer_person.writerow(headings_person)
clv_person = client.model('clv_person')
person_browse = clv_person.browse([])
person_count = 0
for person in person_browse:
person_count += 1
print(person_count, person.code, person.name.encode('utf-8'))
patient_code = False
categories = ''
if person.patient_ids.id != []:
patient_code = person.patient_ids[0].code
for category in person.patient_ids[0].category_ids:
if categories == '':
categories = categories + category.name.encode('utf-8')
else:
categories = categories + ',' + category.name.encode('utf-8')
address = ''
if person.address_id is not False:
address = person.address_id.name.encode('utf-8')
row_person = [person_count,
person.code, person.name.encode('utf-8'),
patient_code, person.birthday,
address,
categories,
]
writer_person.writerow(row_person)
file_person.close()
print('person_count: ', person_count)
def get_arguments():
global username
global password
global dbname
global remote_username
global remote_password
global remote_dbname
parser = argparse.ArgumentParser()
parser.add_argument('--user', action="store", dest="username")
parser.add_argument('--pw', action="store", dest="password")
parser.add_argument('--db', action="store", dest="dbname")
parser.add_argument('--ruser', action="store", dest="remote_username")
parser.add_argument('--rpw', action="store", dest="remote_password")
parser.add_argument('--rdb', action="store", dest="remote_dbname")
args = parser.parse_args()
print('%s%s' % ('--> ', args))
if args.dbname is not None:
dbname = args.dbname
elif dbname == '*':
dbname = raw_input('dbname: ')
if args.username is not None:
username = args.username
elif username == '*':
username = raw_input('username: ')
if args.password is not None:
password = args.password
elif password == '*':
password = getpass.getpass('password: ')
if args.remote_dbname is not None:
remote_dbname = args.remote_dbname
elif remote_dbname == '*':
remote_dbname = raw_input('remote_dbname: ')
if args.remote_username is not None:
remote_username = args.remote_username
elif remote_username == '*':
remote_username = raw_input('remote_username: ')
if args.remote_password is not None:
remote_password = args.remote_password
elif remote_password == '*':
remote_password = getpass.getpass('remote_password: ')
if __name__ == '__main__':
server = 'http://localhost:8069'
# username = 'username'
username = '*'
# password = 'paswword'
password = '*'
dbname = 'odoo'
# dbname = '*'
remote_server = 'http://192.168.25.112:8069'
remote_username = 'username'
# remote_username = '*'
remote_password = 'paswword'
# remote_password = '*'
remote_dbname = 'odoo'
# remote_dbname = '*'
get_arguments()
from time import time
start = time()
print('--> clv_person.py...')
client = erppeek.Client(server, dbname, username, password)
# remote_client = erppeek.Client(remote_server, remote_dbname, remote_username, remote_password)
# person_args = []
# print('-->', client, person_args)
# print('--> Executing clv_person_unlink("new")...')
# clv_person_unlink(client, person_args)
# print('-->', client)
# print('--> Executing clv_person_unlink_aExcluir()...')
# clv_person_unlink_aExcluir(client)
# address_args = []
# print('-->', client, address_args)
# print('--> Executing clv_address_unlink("new")...')
# clv_address_unlink(client, address_args)
# print('-->', remote_client, client)
# print('--> Executing clv_person_import_remote()...')
# clv_person_import_remote(remote_client, client)
# person_args = [('address_id', '!=', False),
# ('family_member_ids', '!=', False),
# ]
# print('-->', client, person_args)
# print('--> Executing clv_person_check_address()...')
# clv_person_check_address(client, person_args)
# file_path = '/opt/openerp/jcafb/data/Lista_de_Campanha_2016-01-15.csv'
# print('-->', client, file_path)
# print('--> Executing clv_person_export_jcafb("new")...')
# clv_person_export_jcafb(client, file_path)
# file_path = '/opt/openerp/jcafb/data/Lista_de_Campanha_2016-01-16.csv'
# print('-->', client, file_path)
# print('--> Executing clv_person_export_jcafb("new")...')
# clv_person_export_jcafb(client, file_path)
# file_path = '/opt/openerp/jcafb/data/Lista_de_Campanha_2016-01-23.csv'
# print('-->', client, file_path)
# print('--> Executing clv_person_export_jcafb("new")...')
# clv_person_export_jcafb(client, file_path)
print('--> clv_person.py')
print('--> Execution time:', secondsToStr(time() - start))
| agpl-3.0 | -4,161,943,679,262,339,000 | 33.647643 | 106 | 0.541216 | false |
yupasik/AT | Tests/Settings/11-Antenna.py | 1 | 4586 | # Test name = Settings
# Script dir = R:\Stingray\Tests\Settings\11-Antenna\11-Antenna.py
from time import sleep
from device import handler, updateTestResult
import RC
import UART
import DO
import GRAB
import MOD
import os
from DO import status
def runTest():
status("active")
TestName = "Settings"
ScriptName = "11-Antenna"
ScriptIndex = "11"
Grabber = DO.grab_define()
platform = DO.load_platform()
Modulation = "DVBS"
FEC = "3/4"
SR = "27500000"
Stream = "\\X_0000_00000_MUX_32000_EPG_Software_20130328a.ts"
Frequency = 1476
Modulator = "1"
COM = "COM7"
settings = [ScriptName, ScriptIndex, Grabber, Modulation, FEC, SR, Stream, Frequency, Modulator, COM]
DO.save_settings(settings)
GRAB.start_capture()
MOD.stop(Modulator)
# macros
searching_from_wizard_general_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_general_english_E501 = ["up 2 3400", "right 1 1000", "down 2 3400", "ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_english_E501 = ["up 3 3400", "right 1 1000", "down 3 3400", "ok 1 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_south_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "down", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_general_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_general_english_ALL = ["up 2 3400", "right 1 1000", "down 2 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "ok 1 5000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_english_ALL = ["up 3 3400", "right 1 1000", "down 3 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200", "down 1 1000", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_south_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "down", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
load_regions_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200"]
load_regions_english_E501 = ["up 2 2400", "right 1 1000", "down 2 2400", "ok 1 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200"]
load_regions_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200"]
load_regions_english_ALL = ["up 2 2400", "right 1 1000", "down 2 2400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200"]
############################ TestCase 1 ##########################################
testcase = 1
status("active")
MOD.play_stream(Modulation, FEC, SR, Stream, Frequency, Modulator)
UART.default_settings()
if platform in ["E501", "E502", "A230"]:
RC.push(searching_from_wizard_general_E501)
else:
RC.push(searching_from_wizard_general_ALL)
UART.start_app("settings")
RC.push(["right 9 3000"])
GRAB.compare(testcase)
############################ TestCase 2 ##########################################
testcase = 2
status("active")
GRAB.compare(testcase)
############################ TestCase 3 ##########################################
testcase = 3
status("active")
GRAB.compare(testcase)
############################ TestCase 4 ##########################################
testcase = 4
status("active")
RC.push(["down", "right 1 3000"])
GRAB.compare(testcase)
############################ TestCase 5 ##########################################
testcase = 5
status("active")
RC.push(["left 1 3000"])
GRAB.compare(testcase)
############################ TestCase 6 ##########################################
testcase = 6
status("active")
RC.push(["ok 1 3000"])
GRAB.compare(testcase)
###################################################################################
status("active")
MOD.stop(Modulator)
GRAB.stop_capture()
| apache-2.0 | -4,923,376,653,249,903,000 | 51.090909 | 218 | 0.551483 | false |
AnObfuscator/WireMockManager | wiremockmanager/test/test_wmm_mock.py | 1 | 6791 | import unittest
import mock
import wiremockmanager.wmm as wmm
from wiremockmanager.workspace import WorkspaceError
from wiremockmanager.wiremock import WireMockError
class WmmMockTest(unittest.TestCase):
def _setup_mocks(self, workspace_mock, wiremock_mock):
# mock methods
workspace_mock.is_valid_directory_structure = mock.Mock()
workspace_mock.is_initialized = mock.Mock()
workspace_mock.initialize = mock.Mock()
workspace_mock.get_dir_for_service = mock.Mock()
workspace_mock.get_log_file_location_for = mock.Mock()
wiremock_mock.start_mocking = mock.Mock()
# preserve exception classes
workspace_mock.WorkspaceError = WorkspaceError
wiremock_mock.WireMockError = WireMockError
# mock default return values
workspace_mock.is_valid_directory_structure.return_value = True
workspace_mock.is_initialized.return_value = True
workspace_mock.get_dir_for_service.return_value = "service/dir"
workspace_mock.get_log_file_location_for.return_value = "some/log/file.log"
wiremock_mock.start_mocking.return_value = object()
@mock.patch('wiremockmanager.wmm.workspace')
@mock.patch('wiremockmanager.wmm.wiremock')
@mock.patch('wiremockmanager.wmm._print_table')
@mock.patch('wiremockmanager.wmm._print_message')
def test_starts_wiremock_and_prints_instance(self, print_msg_mock, print_table_mock, wiremock_mock, workspace_mock):
self._setup_mocks(workspace_mock, wiremock_mock)
wmm.mock(api='test-api', version='test-version', port=1234, https_port=5678)
workspace_mock.is_valid_directory_structure.assert_called_once()
workspace_mock.is_initialized.assert_called_once()
workspace_mock.initialize.assert_not_called()
workspace_mock.get_dir_for_service.assert_called_once_with('test-api', 'test-version')
workspace_mock.get_log_file_location_for.assert_called_once_with('test-api', 'test-version')
wiremock_mock.start_mocking.assert_called_once_with('service/dir', 'some/log/file.log', 1234, 5678, None, None)
print_table_mock.assert_called_once()
print_msg_mock.assert_not_called()
@mock.patch('wiremockmanager.wmm.workspace')
@mock.patch('wiremockmanager.wmm.wiremock')
@mock.patch('wiremockmanager.wmm._print_table')
@mock.patch('wiremockmanager.wmm._print_message')
def test_unavailable_service_dir_prints_error_and_exits(self, print_msg_mock, print_table_mock, wiremock_mock, workspace_mock):
self._setup_mocks(workspace_mock, wiremock_mock)
workspace_mock.get_dir_for_service.side_effect = WorkspaceError('workspace error message')
wmm.mock(api='test-api', version='test-version', port=1234, https_port=5678)
workspace_mock.is_valid_directory_structure.assert_called_once()
workspace_mock.is_initialized.assert_called_once()
workspace_mock.initialize.assert_not_called()
workspace_mock.get_dir_for_service.assert_called_once_with('test-api', 'test-version')
workspace_mock.get_log_file_location_for.assert_not_called()
wiremock_mock.start_mocking.assert_not_called()
print_table_mock.assert_not_called()
print_msg_mock.assert_called_once_with('workspace error message')
@mock.patch('wiremockmanager.wmm.workspace')
@mock.patch('wiremockmanager.wmm.wiremock')
@mock.patch('wiremockmanager.wmm._print_table')
@mock.patch('wiremockmanager.wmm._print_message')
def test_for_wiremock_failure_prints_error_and_exits(self, print_msg_mock, print_table_mock, wiremock_mock, workspace_mock):
self._setup_mocks(workspace_mock, wiremock_mock)
wiremock_mock.start_mocking.side_effect = WireMockError('error.log')
wmm.mock(api='test-api', version='test-version', port=1234, https_port=5678)
workspace_mock.is_valid_directory_structure.assert_called_once()
workspace_mock.is_initialized.assert_called_once()
workspace_mock.initialize.assert_not_called()
workspace_mock.get_dir_for_service.assert_called_once_with('test-api', 'test-version')
workspace_mock.get_log_file_location_for.assert_called_once_with('test-api', 'test-version')
wiremock_mock.start_mocking.assert_called_once_with('service/dir', 'some/log/file.log', 1234, 5678, None, None)
print_table_mock.assert_not_called()
print_msg_mock.assert_called_once_with('Could not start WireMock instance. Please see log file for more details: {}'.format('error.log'))
@mock.patch('wiremockmanager.wmm.workspace')
@mock.patch('wiremockmanager.wmm.wiremock')
@mock.patch('wiremockmanager.wmm._print_table')
@mock.patch('wiremockmanager.wmm._print_message')
def test_invalid_directory_prints_error_and_exits(self, print_msg_mock, print_table_mock, wiremock_mock, workspace_mock):
self._setup_mocks(workspace_mock, wiremock_mock)
workspace_mock.is_valid_directory_structure.return_value = False
wmm.mock(api='test-api', version='test-version', port=1234, https_port=5678)
workspace_mock.is_valid_directory_structure.assert_called_once()
workspace_mock.is_initialized.assert_not_called()
workspace_mock.initialize.assert_not_called()
workspace_mock.get_dir_for_service.assert_not_called()
workspace_mock.get_log_file_location_for.assert_not_called()
wiremock_mock.start_mocking.assert_not_called()
print_table_mock.assert_not_called()
print_msg_mock.assert_called_once_with('Current directory does not appear to be a valid WMM working directory.')
@mock.patch('wiremockmanager.wmm.workspace')
@mock.patch('wiremockmanager.wmm.wiremock')
@mock.patch('wiremockmanager.wmm._print_table')
@mock.patch('wiremockmanager.wmm._print_message')
def test_uninitialized_directory_calls_initialize_and_continues(self, print_msg_mock, print_table_mock, wiremock_mock, workspace_mock):
self._setup_mocks(workspace_mock, wiremock_mock)
workspace_mock.is_initialized.return_value = False
wmm.mock(api='test-api', version='test-version', port=1234, https_port=5678)
workspace_mock.is_valid_directory_structure.assert_called_once()
workspace_mock.is_initialized.assert_called_once()
workspace_mock.initialize.assert_called_once()
workspace_mock.get_dir_for_service.assert_called_once_with('test-api', 'test-version')
workspace_mock.get_log_file_location_for.assert_called_once_with('test-api', 'test-version')
wiremock_mock.start_mocking.assert_called_once_with('service/dir', 'some/log/file.log', 1234, 5678, None, None)
print_table_mock.assert_called_once()
print_msg_mock.assert_not_called() | gpl-3.0 | 3,372,866,712,384,658,400 | 53.774194 | 145 | 0.712119 | false |
clones/python-blinker | tests/test_signals.py | 1 | 10838 | import gc
import sys
import time
import blinker
from nose.tools import assert_raises
jython = sys.platform.startswith('java')
pypy = hasattr(sys, 'pypy_version_info')
def collect_acyclic_refs():
# cpython releases these immediately without a collection
if jython or pypy:
gc.collect()
if jython:
time.sleep(0.1)
class Sentinel(list):
"""A signal receipt accumulator."""
def make_receiver(self, key):
"""Return a generic signal receiver function logging as *key*
When connected to a signal, appends (key, sender, kw) to the Sentinel.
"""
def receiver(*sentby, **kw):
self.append((key, sentby[0], kw))
receiver.func_name = 'receiver_%s' % key
return receiver
def test_meta_connect():
sentinel = []
def meta_received(sender, **kw):
sentinel.append(dict(kw, sender=sender))
assert not blinker.receiver_connected.receivers
blinker.receiver_connected.connect(meta_received)
assert not sentinel
def receiver(sender, **kw):
pass
sig = blinker.Signal()
sig.connect(receiver)
assert sentinel == [dict(sender=sig,
receiver_arg=receiver,
sender_arg=blinker.ANY,
weak_arg=True)]
blinker.receiver_connected._clear_state()
def _test_signal_signals(sender):
sentinel = Sentinel()
sig = blinker.Signal()
connected = sentinel.make_receiver('receiver_connected')
disconnected = sentinel.make_receiver('receiver_disconnected')
receiver1 = sentinel.make_receiver('receiver1')
receiver2 = sentinel.make_receiver('receiver2')
assert not sig.receiver_connected.receivers
assert not sig.receiver_disconnected.receivers
sig.receiver_connected.connect(connected)
sig.receiver_disconnected.connect(disconnected)
assert sig.receiver_connected.receivers
assert not sentinel
for receiver, weak in [(receiver1, True), (receiver2, False)]:
sig.connect(receiver, sender=sender, weak=weak)
expected = ('receiver_connected',
sig,
dict(receiver=receiver, sender=sender, weak=weak))
assert sentinel[-1] == expected
# disconnect from explicit sender
sig.disconnect(receiver1, sender=sender)
expected = ('receiver_disconnected',
sig,
dict(receiver=receiver1, sender=sender))
assert sentinel[-1] == expected
# disconnect from ANY and all senders (implicit disconnect signature)
sig.disconnect(receiver2)
assert sentinel[-1] == ('receiver_disconnected',
sig,
dict(receiver=receiver2, sender=blinker.ANY))
def test_signal_signals_any_sender():
_test_signal_signals(blinker.ANY)
def test_signal_signals_strong_sender():
_test_signal_signals("squiznart")
def test_signal_weak_receiver_vanishes():
# non-edge-case path for weak receivers is exercised in the ANY sender
# test above.
sentinel = Sentinel()
sig = blinker.Signal()
connected = sentinel.make_receiver('receiver_connected')
disconnected = sentinel.make_receiver('receiver_disconnected')
receiver1 = sentinel.make_receiver('receiver1')
receiver2 = sentinel.make_receiver('receiver2')
sig.receiver_connected.connect(connected)
sig.receiver_disconnected.connect(disconnected)
# explicit disconnect on a weak does emit the signal
sig.connect(receiver1, weak=True)
sig.disconnect(receiver1)
assert len(sentinel) == 2
assert sentinel[-1][2]['receiver'] is receiver1
del sentinel[:]
sig.connect(receiver2, weak=True)
assert len(sentinel) == 1
del sentinel[:] # holds a ref to receiver2
del receiver2
collect_acyclic_refs()
# no disconnect signal is fired
assert len(sentinel) == 0
# and everything really is disconnected
sig.send('abc')
assert len(sentinel) == 0
def test_signal_signals_weak_sender():
sentinel = Sentinel()
sig = blinker.Signal()
connected = sentinel.make_receiver('receiver_connected')
disconnected = sentinel.make_receiver('receiver_disconnected')
receiver1 = sentinel.make_receiver('receiver1')
receiver2 = sentinel.make_receiver('receiver2')
class Sender(object):
"""A weakref-able object."""
sig.receiver_connected.connect(connected)
sig.receiver_disconnected.connect(disconnected)
sender1 = Sender()
sig.connect(receiver1, sender=sender1, weak=False)
# regular disconnect of weak-able sender works fine
sig.disconnect(receiver1, sender=sender1)
assert len(sentinel) == 2
del sentinel[:]
sender2 = Sender()
sig.connect(receiver2, sender=sender2, weak=False)
# force sender2 to go out of scope
del sender2
collect_acyclic_refs()
# no disconnect signal is fired
assert len(sentinel) == 1
# and everything really is disconnected
sig.send('abc')
assert len(sentinel) == 1
def test_meta_connect_failure():
def meta_received(sender, **kw):
raise TypeError('boom')
assert not blinker.receiver_connected.receivers
blinker.receiver_connected.connect(meta_received)
def receiver(sender, **kw):
pass
sig = blinker.Signal()
assert_raises(TypeError, sig.connect, receiver)
assert not sig.receivers
assert not sig._by_receiver
assert sig._by_sender == {blinker.base.ANY_ID: set()}
blinker.receiver_connected._clear_state()
def test_singletons():
ns = blinker.Namespace()
assert not ns
s1 = ns.signal('abc')
assert s1 is ns.signal('abc')
assert s1 is not ns.signal('def')
assert 'abc' in ns
collect_acyclic_refs()
# weak by default, already out of scope
assert 'def' not in ns
del s1
collect_acyclic_refs()
assert 'abc' not in ns
def test_weak_receiver():
sentinel = []
def received(sender, **kw):
sentinel.append(kw)
sig = blinker.Signal()
# XXX: weirdly, under jython an explicit weak=True causes this test
# to fail, leaking a strong ref to the receiver somewhere.
# http://bugs.jython.org/issue1586
if jython:
sig.connect(received) # weak=True by default.
else:
sig.connect(received, weak=True)
del received
collect_acyclic_refs()
assert not sentinel
sig.send()
assert not sentinel
assert not sig.receivers
values_are_empty_sets_(sig._by_receiver)
values_are_empty_sets_(sig._by_sender)
def test_strong_receiver():
sentinel = []
def received(sender):
sentinel.append(sender)
fn_id = id(received)
sig = blinker.Signal()
sig.connect(received, weak=False)
del received
collect_acyclic_refs()
assert not sentinel
sig.send()
assert sentinel
assert [id(fn) for fn in sig.receivers.values()] == [fn_id]
def test_instancemethod_receiver():
sentinel = []
class Receiver(object):
def __init__(self, bucket):
self.bucket = bucket
def received(self, sender):
self.bucket.append(sender)
receiver = Receiver(sentinel)
sig = blinker.Signal()
sig.connect(receiver.received)
assert not sentinel
sig.send()
assert sentinel
del receiver
collect_acyclic_refs()
sig.send()
assert len(sentinel) == 1
def test_filtered_receiver():
sentinel = []
def received(sender):
sentinel.append(sender)
sig = blinker.Signal()
sig.connect(received, 123)
assert not sentinel
sig.send()
assert not sentinel
sig.send(123)
assert sentinel == [123]
sig.send()
assert sentinel == [123]
sig.disconnect(received, 123)
sig.send(123)
assert sentinel == [123]
sig.connect(received, 123)
sig.send(123)
assert sentinel == [123, 123]
sig.disconnect(received)
sig.send(123)
assert sentinel == [123, 123]
def test_filtered_receiver_weakref():
sentinel = []
def received(sender):
sentinel.append(sender)
class Object(object):
pass
obj = Object()
sig = blinker.Signal()
sig.connect(received, obj)
assert not sentinel
sig.send(obj)
assert sentinel == [obj]
del sentinel[:]
del obj
collect_acyclic_refs()
# general index isn't cleaned up
assert sig.receivers
# but receiver/sender pairs are
values_are_empty_sets_(sig._by_receiver)
values_are_empty_sets_(sig._by_sender)
def test_decorated_receiver():
sentinel = []
class Object(object):
pass
obj = Object()
sig = blinker.Signal()
@sig.connect_via(obj)
def receiver(sender, **kw):
sentinel.append(kw)
assert not sentinel
sig.send()
assert not sentinel
sig.send(1)
assert not sentinel
sig.send(obj)
assert sig.receivers
del receiver
collect_acyclic_refs()
assert sig.receivers
def test_no_double_send():
sentinel = []
def received(sender):
sentinel.append(sender)
sig = blinker.Signal()
sig.connect(received, 123)
sig.connect(received)
assert not sentinel
sig.send()
assert sentinel == [None]
sig.send(123)
assert sentinel == [None, 123]
sig.send()
assert sentinel == [None, 123, None]
def test_has_receivers():
received = lambda sender: None
sig = blinker.Signal()
assert not sig.has_receivers_for(None)
assert not sig.has_receivers_for(blinker.ANY)
sig.connect(received, 'xyz')
assert not sig.has_receivers_for(None)
assert not sig.has_receivers_for(blinker.ANY)
assert sig.has_receivers_for('xyz')
class Object(object):
pass
o = Object()
sig.connect(received, o)
assert sig.has_receivers_for(o)
del received
collect_acyclic_refs()
assert not sig.has_receivers_for('xyz')
assert list(sig.receivers_for('xyz')) == []
assert list(sig.receivers_for(o)) == []
sig.connect(lambda sender: None, weak=False)
assert sig.has_receivers_for('xyz')
assert sig.has_receivers_for(o)
assert sig.has_receivers_for(None)
assert sig.has_receivers_for(blinker.ANY)
assert sig.has_receivers_for('xyz')
def test_instance_doc():
sig = blinker.Signal(doc='x')
assert sig.__doc__ == 'x'
sig = blinker.Signal('x')
assert sig.__doc__ == 'x'
def test_named_blinker():
sig = blinker.NamedSignal('squiznart')
assert 'squiznart' in repr(sig)
def values_are_empty_sets_(dictionary):
for val in dictionary.values():
assert val == set()
if sys.version_info < (2, 5):
def test_context_manager_warning():
sig = blinker.Signal()
receiver = lambda sender: None
assert_raises(RuntimeError, sig.temporarily_connected_to, receiver)
| mit | -1,025,974,145,979,513,000 | 23.40991 | 78 | 0.646614 | false |
kornai/pymachine | src/pymachine/wrapper.py | 1 | 8561 | #!/usr/bin/env python
from copy import deepcopy
import cPickle
import logging
import os
import re
import sys
from pymachine.construction import VerbConstruction
from pymachine.sentence_parser import SentenceParser
from pymachine.lexicon import Lexicon
from pymachine.utils import ensure_dir, MachineGraph, MachineTraverser
from pymachine.machine import Machine
from pymachine.spreading_activation import SpreadingActivation
from pymachine.definition_parser import read_defs
from pymachine.sup_dic import supplementary_dictionary_reader as sdreader
from pymachine import np_grammar
class KeyDefaultDict(dict):
def __missing__(self, key):
return key
def jaccard(s1, s2):
try:
return float(len(s1 & s2)) / len(s1 | s2)
except ZeroDivisionError:
return 0.0
class Wrapper:
num_re = re.compile(r'^[0-9.,]+$', re.UNICODE)
def __init__(self, cfg, batch=False, include_ext=True):
self.cfg = cfg
self.__read_config()
self.batch = batch
self.wordlist = set()
self.__read_definitions()
if include_ext:
self.get_ext_definitions()
self.__read_supp_dict()
self.reset_lexicon()
def reset_lexicon(self, load_from=None, save_to=None):
if load_from:
self.lexicon = cPickle.load(open(load_from))
else:
self.lexicon = Lexicon()
self.__add_definitions()
self.__add_constructions()
if save_to:
cPickle.dump(self.lexicon, open(save_to, 'w'))
def __read_config(self):
items = dict(self.cfg.items("machine"))
self.def_files = [(s.split(":")[0].strip(), int(s.split(":")[1]))
for s in items["definitions"].split(",")]
self.dep_map_fn = items.get("dep_map")
self.tok2lemma_fn = items.get("tok2lemma")
self.ext_defs_path = items.get("ext_definitions")
self.supp_dict_fn = items.get("supp_dict")
self.plural_fn = items.get("plurals")
def __read_definitions(self):
self.definitions = {}
for file_name, printname_index in self.def_files:
# TODO HACK makefile needed
if (file_name.endswith("generated") and
not os.path.exists(file_name)):
raise Exception(
"A definition file that should be generated" +
" by pymachine/scripts/generate_translation_dict.sh" +
" does not exist: {0}".format(file_name))
if file_name.endswith('pickle'):
logging.info(
'loading 4lang definitions from {}...'.format(file_name))
definitions = cPickle.load(file(file_name))
else:
logging.info('parsing 4lang definitions...')
definitions = read_defs(
file(file_name), printname_index=printname_index,
three_parts=True)
logging.info('dumping 4lang definitions to file...')
f = open('{0}.pickle'.format(file_name), 'w')
cPickle.dump(definitions, f)
for pn, machines in definitions.iteritems():
if pn not in self.definitions:
self.definitions[pn] = machines
else:
self.definitions[pn] |= machines
def __add_definitions(self):
definitions = deepcopy(self.definitions)
self.lexicon.add_static(definitions.itervalues())
self.lexicon.finalize_static()
def __read_supp_dict(self):
self.supp_dict = sdreader(
file(self.supp_dict_fn)) if self.supp_dict_fn else {}
def __add_constructions(self):
for construction in np_grammar.np_rules:
self.lexicon.add_construction(construction)
# add_verb_constructions(self.lexicon, self.supp_dict)
# add_avm_constructions(self.lexicon, self.supp_dict)
def get_ext_definitions(self):
if self.ext_defs_path.endswith('pickle'):
logging.info(
'loading external definitions from {}...'.format(
self.ext_defs_path))
definitions = cPickle.load(file(self.ext_defs_path))
else:
raise Exception("building machines from deps has moved to 4lang")
for word, machine in definitions.iteritems():
if word not in self.definitions:
self.definitions[word] = set([machine])
logging.info('done')
def draw_single_graph(self, word, path):
clean_word = Machine.d_clean(word)
for c, machine in enumerate(self.definitions[word]):
graph = MachineGraph.create_from_machines([machine])
file_name = os.path.join(path, '{0}_{1}.dot'.format(clean_word, c))
with open(file_name, 'w') as file_obj:
file_obj.write(graph.to_dot().encode('utf-8'))
def draw_word_graphs(self):
ensure_dir('graphs/words')
for c, (word, machines) in enumerate(self.definitions.iteritems()):
if c % 1000 == 0:
logging.info("{0}...".format(c))
for i, machine in enumerate(machines):
graph = MachineGraph.create_from_machines([machine])
clean_word = Machine.d_clean(word)
if clean_word[0] == 'X':
clean_word = clean_word[1:]
f = open('graphs/words/{0}_{1}.dot'.format(clean_word, i), 'w')
f.write(graph.to_dot().encode('utf-8'))
def get_def_words(self, stream):
for headword, machines in self.definitions.iteritems():
if headword[0] == '@':
continue
for machine in machines:
def_words = [
word for word in MachineTraverser.get_nodes(machine)
if word[0] not in '=@']
stream.write(
u"{0}\t{1}\n".format(
headword, u"\t".join(def_words)).encode("utf-8"))
def run(self, sentence):
"""Parses a sentence, runs the spreading activation and returns the
messages that have to be sent to the active plugins."""
try:
sp = SentenceParser()
sa = SpreadingActivation(self.lexicon)
machines = sp.parse(sentence)
logging.debug('machines: {}'.format(machines))
logging.debug('machines: {}'.format(
[m for m in machines]))
for machine_list in machines:
for machine in machine_list:
if machine.control.kr['CAT'] == 'VERB':
logging.debug('adding verb construction for {}'.format(
machine))
self.lexicon.add_construction(VerbConstruction(
machine.printname(), self.lexicon, self.supp_dict))
logging.info('constructions: {}'.format(
self.lexicon.constructions))
# results is a list of (url, data) tuples
results = sa.activation_loop(machines)
print 'results:', results
print 'machines:', machines
graph = MachineGraph.create_from_machines(
[m[0] for m in machines], max_depth=1)
f = open('machines.dot', 'w')
f.write(graph.to_dot().encode('utf-8'))
self.lexicon.clear_active()
except Exception, e:
import traceback
traceback.print_exc(e)
raise(e)
return results
def test_plain():
print 'building wrapper...'
w = Wrapper(sys.argv[1])
test_sen = [
([
("The", "the/ART"),
("snake", "snake/NOUN")], 'NP'),
("ate", "eat/VERB<PAST>"),
([
("the", "the/ART"),
("elephant", "elephant/NOUN")], 'NP')]
print 'running...'
w.run(test_sen)
def test_dep():
print 'building wrapper...'
w = Wrapper(sys.argv[1])
for line in sys.stdin:
w.add_dependency(line)
active_machines = w.lexicon.active_machines()
logging.debug('active machines: {}'.format(active_machines))
graph = MachineGraph.create_from_machines(active_machines)
f = open('machines.dot', 'w')
f.write(graph.to_dot().encode('utf-8'))
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s : " +
"%(module)s (%(lineno)s) - %(levelname)s - %(message)s")
w = Wrapper(sys.argv[1])
w.draw_word_graphs()
| mit | -4,532,952,299,784,070,000 | 36.384279 | 79 | 0.559397 | false |
springcoil/pymc3 | pymc3/variational/flows.py | 1 | 17369 | import numpy as np
import theano
from theano import tensor as tt
from pymc3.distributions.dist_math import rho2sd
from pymc3.theanof import change_flags
from .opvi import node_property, collect_shared_to_list
from . import opvi
__all__ = [
'Formula',
'PlanarFlow',
'LocFlow',
'ScaleFlow'
]
class Formula(object):
"""
Helpful class to use string like formulas with
__call__ syntax similar to Flow.__init__
Parameters
----------
formula : str
string representing normalizing flow
e.g. 'planar', 'planar*4', 'planar*4-radial*3', 'planar-radial-planar'
Yet simple pattern is supported:
1. dash separated flow identifiers
2. star for replication after flow identifier
Methods
-------
__call__(z0, dim, jitter) - initializes and links all flows returning the last one
"""
def __init__(self, formula):
identifiers = formula.lower().replace(' ', '').split('-')
self.formula = '-'.join(identifiers)
identifiers = [idf.split('*') for idf in identifiers]
self.flows = []
for tup in identifiers:
if len(tup) == 1:
self.flows.append(flow_for_short_name(tup[0]))
elif len(tup) == 2:
self.flows.extend([flow_for_short_name(tup[0])] * int(tup[1]))
else:
raise ValueError('Wrong format: %s' % formula)
if len(self.flows) == 0:
raise ValueError('No flows in formula')
def __call__(self, z0=None, dim=None, jitter=.001, params=None, batch_size=None):
if len(self.flows) == 0:
raise ValueError('No flows in formula')
if params is None:
params = dict()
flow = z0
for i, flow_cls in enumerate(self.flows):
flow = flow_cls(dim=dim, jitter=jitter, z0=flow, batch_size=batch_size, **params.get(i, {}))
return flow
def __reduce__(self):
return self.__class__, self.formula
def __latex__(self):
return r'Formula{\mathcal{N}(0, 1) -> %s}' % self.formula
__repr__ = _latex_repr_ = __latex__
def get_param_spec_for(self, **kwargs):
res = dict()
for i, cls in enumerate(self.flows):
res[i] = cls.get_param_spec_for(**kwargs)
return res
def seems_like_formula(formula):
try:
Formula(formula)
return True
except (ValueError, KeyError):
return False
def seems_like_flow_params(params):
if set(range(len(params))) == set(params):
for p in params.values():
try:
flow_for_params(p)
except KeyError:
return False
else:
return True
else:
return False
class AbstractFlow(object):
shared_params = None
__param_spec__ = dict()
short_name = ''
__param_registry = dict()
__name_registry = dict()
@classmethod
def register(cls, sbcls):
assert frozenset(sbcls.__param_spec__) not in cls.__param_registry, 'Duplicate __param_spec__'
cls.__param_registry[frozenset(sbcls.__param_spec__)] = sbcls
assert sbcls.short_name not in cls.__name_registry, 'Duplicate short_name'
cls.__name_registry[sbcls.short_name] = sbcls
return sbcls
@classmethod
def flow_for_params(cls, params):
if frozenset(params) not in cls.__param_registry:
raise KeyError('No such flow for the following params: {!r}, '
'only the following are supported\n\n{}'
.format(params, cls.__param_registry))
return cls.__param_registry[frozenset(params)]
@classmethod
def flow_for_short_name(cls, name):
if name.lower() not in cls.__name_registry:
raise KeyError('No such flow: {!r}, '
'only the following are supported\n\n{}'
.format(name, cls.__name_registry))
return cls.__name_registry[name.lower()]
def __init__(self, z0=None, dim=None, jitter=.001, batch_size=None, local=False):
self.local = local
self.batch_size = batch_size
self.__jitter = jitter
if isinstance(z0, AbstractFlow):
parent = z0
dim = parent.dim
z0 = parent.forward
else:
parent = None
if dim is not None:
self.dim = dim
else:
raise ValueError('Cannot infer dimension of flow, '
'please provide dim or Flow instance as z0')
if z0 is None:
self.z0 = tt.matrix() # type: tt.TensorVariable
else:
self.z0 = tt.as_tensor(z0)
self.parent = parent
def add_param(self, user=None, name=None, ref=0., dtype='floatX'):
if dtype == 'floatX':
dtype = theano.config.floatX
spec = self.__param_spec__[name]
shape = tuple(eval(s, {'d': self.dim}) for s in spec)
if user is None:
if self.local:
raise opvi.LocalGroupError('Need parameters for local group flow')
if self.batched:
if self.batch_size is None:
raise opvi.BatchedGroupError('Need batch size to infer parameter shape')
shape = (self.batch_size,) + shape
return theano.shared(
np.asarray(np.random.normal(size=shape) * self.__jitter + ref).astype(dtype),
name=name
)
else:
if self.batched:
if self.local or self.batch_size is None:
shape = (-1,) + shape
else:
shape = (self.batch_size,) + shape
return tt.as_tensor(user).reshape(shape)
@property
def params(self):
return collect_shared_to_list(self.shared_params)
@property
def all_params(self):
params = self.params # type: list
current = self
while not current.isroot:
current = current.parent
params.extend(current.params)
return params
@property
@change_flags(compute_test_value='off')
def sum_logdets(self):
dets = [self.logdet]
current = self
while not current.isroot:
current = current.parent
dets.append(current.logdet)
return tt.add(*dets)
@node_property
def forward(self):
raise NotImplementedError
@node_property
def logdet(self):
raise NotImplementedError
@change_flags(compute_test_value='off')
def forward_pass(self, z0):
ret = theano.clone(self.forward, {self.root.z0: z0})
try:
ret.tag.test_value = np.random.normal(
size=z0.tag.test_value.shape
).astype(self.z0.dtype)
except AttributeError:
ret.tag.test_value = self.root.z0.tag.test_value
return ret
__call__ = forward_pass
@property
def root(self):
current = self
while not current.isroot:
current = current.parent
return current
@property
def formula(self):
f = self.short_name
current = self
while not current.isroot:
current = current.parent
f = current.short_name + '-' + f
return f
@property
def isroot(self):
return self.parent is None
@property
def batched(self):
return self.z0.ndim == 3
@classmethod
def get_param_spec_for(cls, **kwargs):
res = dict()
for name, fshape in cls.__param_spec__.items():
res[name] = tuple(eval(s, kwargs) for s in fshape)
return res
def __repr__(self):
return 'Flow{%s}' % self.short_name
def __str__(self):
return self.short_name
flow_for_params = AbstractFlow.flow_for_params
flow_for_short_name = AbstractFlow.flow_for_short_name
class FlowFn(object):
@staticmethod
def fn(*args):
raise NotImplementedError
@staticmethod
def inv(*args):
raise NotImplementedError
@staticmethod
def deriv(*args):
raise NotImplementedError
def __call__(self, *args):
return self.fn(*args)
class LinearFlow(AbstractFlow):
__param_spec__ = dict(u=('d', ), w=('d', ), b=())
@change_flags(compute_test_value='off')
def __init__(self, h, u=None, w=None, b=None, **kwargs):
self.h = h
super(LinearFlow, self).__init__(**kwargs)
u = self.add_param(u, 'u')
w = self.add_param(w, 'w')
b = self.add_param(b, 'b')
self.shared_params = dict(u=u, w=w, b=b)
self.u_, self.w_ = self.make_uw(self.u, self.w)
u = property(lambda self: self.shared_params['u'])
w = property(lambda self: self.shared_params['w'])
b = property(lambda self: self.shared_params['b'])
def make_uw(self, u, w):
raise NotImplementedError('Need to implement valid U, W transform')
@node_property
def forward(self):
z = self.z0 # sxd
u = self.u_ # d
w = self.w_ # d
b = self.b # .
h = self.h # f
# h(sxd \dot d + .) = s
if not self.batched:
hwz = h(z.dot(w) + b) # s
# sxd + (s \outer d) = sxd
z1 = z + tt.outer(hwz, u) # sxd
return z1
else:
z = z.swapaxes(0, 1)
# z bxsxd
# u bxd
# w bxd
b = b.dimshuffle(0, 'x')
# b bx-
hwz = h(tt.batched_dot(z, w) + b) # bxs
# bxsxd + (bxsx- * bx-xd) = bxsxd
hwz = hwz.dimshuffle(0, 1, 'x') # bxsx-
u = u.dimshuffle(0, 'x', 1) # bx-xd
z1 = z + hwz * u # bxsxd
return z1.swapaxes(0, 1) # sxbxd
@node_property
def logdet(self):
z = self.z0 # sxd
u = self.u_ # d
w = self.w_ # d
b = self.b # .
deriv = self.h.deriv # f'
if not self.batched:
# f'(sxd \dot d + .) * -xd = sxd
phi = deriv(z.dot(w) + b).dimshuffle(0, 'x') * w.dimshuffle('x', 0)
# \abs(. + sxd \dot d) = s
det = tt.abs_(1. + phi.dot(u))
return tt.log(det)
else:
z = z.swapaxes(0, 1)
b = b.dimshuffle(0, 'x')
# z bxsxd
# u bxd
# w bxd
# b bx-x-
# f'(bxsxd \bdot bxd + bx-x-) * bx-xd = bxsxd
phi = deriv(tt.batched_dot(z, w) + b).dimshuffle(0, 1, 'x') * w.dimshuffle(0, 'x', 1)
# \abs(. + bxsxd \bdot bxd) = bxs
det = tt.abs_(1. + tt.batched_dot(phi, u)) # bxs
return tt.log(det).sum(0) # s
class Tanh(FlowFn):
fn = tt.tanh
inv = tt.arctanh
@staticmethod
def deriv(*args):
x, = args
return 1. - tt.tanh(x) ** 2
@AbstractFlow.register
class PlanarFlow(LinearFlow):
short_name = 'planar'
def __init__(self, **kwargs):
super(PlanarFlow, self).__init__(h=Tanh(), **kwargs)
def make_uw(self, u, w):
if not self.batched:
# u_ : d
# w_ : d
wu = u.dot(w) # .
mwu = -1. + tt.nnet.softplus(wu) # .
# d + (. - .) * d / .
u_h = (
u+(mwu-wu) *
w/((w**2).sum()+1e-10)
)
return u_h, w
else:
# u_ : bxd
# w_ : bxd
wu = (u*w).sum(-1, keepdims=True) # bx-
mwu = -1. + tt.nnet.softplus(wu) # bx-
# bxd + (bx- - bx-) * bxd / bx- = bxd
u_h = (
u
+ (mwu - wu)
* w / ((w ** 2).sum(-1, keepdims=True) + 1e-10)
)
return u_h, w
class ReferencePointFlow(AbstractFlow):
__param_spec__ = dict(a=(), b=(), z_ref=('d', ))
@change_flags(compute_test_value='off')
def __init__(self, h, a=None, b=None, z_ref=None, **kwargs):
super(ReferencePointFlow, self).__init__(**kwargs)
a = self.add_param(a, 'a')
b = self.add_param(b, 'b')
if hasattr(self.z0, 'tag') and hasattr(self.z0.tag, 'test_value'):
z_ref = self.add_param(
z_ref, 'z_ref',
ref=self.z0.tag.test_value[0],
dtype=self.z0.dtype
)
else:
z_ref = self.add_param(
z_ref, 'z_ref', dtype=self.z0.dtype
)
self.h = h
self.shared_params = dict(a=a, b=b, z_ref=z_ref)
self.a_, self.b_ = self.make_ab(self.a, self.b)
a = property(lambda self: self.shared_params['a'])
b = property(lambda self: self.shared_params['b'])
z_ref = property(lambda self: self.shared_params['z_ref'])
def make_ab(self, a, b):
raise NotImplementedError('Need to specify how to get a, b')
@node_property
def forward(self):
a = self.a_ # .
b = self.b_ # .
z_ref = self.z_ref # d
z = self.z0 # sxd
h = self.h # h(a, r)
if self.batched:
# a bx-x-
# b bx-x-
# z bxsxd
# z_ref bx-xd
z = z.swapaxes(0, 1)
a = a.dimshuffle(0, 'x', 'x')
b = b.dimshuffle(0, 'x', 'x')
z_ref = z_ref.dimshuffle(0, 'x', 1)
r = (z - z_ref).norm(2, axis=-1, keepdims=True) # sx- (bxsx-)
# global: sxd + . * h(., sx-) * (sxd - sxd) = sxd
# local: bxsxd + b * h(b, bxsx-) * (bxsxd - bxsxd) = bxsxd
z1 = z + b * h(a, r) * (z-z_ref)
if self.batched:
z1 = z1.swapaxes(0, 1)
return z1
@node_property
def logdet(self):
d = float(self.dim)
a = self.a_ # .
b = self.b_ # .
z_ref = self.z_ref # d
z = self.z0 # sxd
h = self.h # h(a, r)
deriv = self.h.deriv # h'(a, r)
if self.batched:
z = z.swapaxes(0, 1)
a = a.dimshuffle(0, 'x', 'x')
b = b.dimshuffle(0, 'x', 'x')
z_ref = z_ref.dimshuffle(0, 'x', 1)
# a bx-x-
# b bx-x-
# z bxsxd
# z_ref bx-xd
r = (z - z_ref).norm(2, axis=-1, keepdims=True) # s
har = h(a, r)
dar = deriv(a, r)
logdet = tt.log((1. + b*har)**(d-1.) * (1. + b*har + b*dar*r))
if self.batched:
return logdet.sum([0, -1])
else:
return logdet.sum(-1)
class Radial(FlowFn):
@staticmethod
def fn(*args):
a, r = args
return 1./(a+r)
@staticmethod
def inv(*args):
a, y = args
return 1./y - a
@staticmethod
def deriv(*args):
a, r = args
return -1. / (a + r) ** 2
@AbstractFlow.register
class RadialFlow(ReferencePointFlow):
short_name = 'radial'
def __init__(self, **kwargs):
super(RadialFlow, self).__init__(Radial(), **kwargs)
def make_ab(self, a, b):
a = tt.exp(a)
b = -a + tt.nnet.softplus(b)
return a, b
@AbstractFlow.register
class LocFlow(AbstractFlow):
__param_spec__ = dict(loc=('d', ))
short_name = 'loc'
def __init__(self, loc=None, **kwargs):
super(LocFlow, self).__init__(**kwargs)
loc = self.add_param(loc, 'loc')
self.shared_params = dict(loc=loc)
loc = property(lambda self: self.shared_params['loc'])
@node_property
def forward(self):
loc = self.loc # (bx)d
z = self.z0 # sx(bx)d
return z + loc
@node_property
def logdet(self):
return tt.zeros((self.z0.shape[0],))
@AbstractFlow.register
class ScaleFlow(AbstractFlow):
__param_spec__ = dict(rho=('d', ))
short_name = 'scale'
@change_flags(compute_test_value='off')
def __init__(self, rho=None, **kwargs):
super(ScaleFlow, self).__init__(**kwargs)
rho = self.add_param(rho, 'rho')
self.scale = rho2sd(rho)
self.shared_params = dict(rho=rho)
log_scale = property(lambda self: self.shared_params['log_scale'])
@node_property
def forward(self):
scale = self.scale # (bx)d
z = self.z0 # sx(bx)d
return z * scale
@node_property
def logdet(self):
return tt.repeat(tt.sum(tt.log(self.scale)), self.z0.shape[0])
@AbstractFlow.register
class HouseholderFlow(AbstractFlow):
__param_spec__ = dict(v=('d', ))
short_name = 'hh'
@change_flags(compute_test_value='raise')
def __init__(self, v=None, **kwargs):
super(HouseholderFlow, self).__init__(**kwargs)
v = self.add_param(v, 'v')
self.shared_params = dict(v=v)
if self.batched:
vv = v.dimshuffle(0, 1, 'x') * v.dimshuffle(0, 'x', 1)
I = tt.eye(self.dim).dimshuffle('x', 0, 1)
vvn = (1e-10+(v**2).sum(-1)).dimshuffle(0, 'x', 'x')
else:
vv = tt.outer(v, v)
I = tt.eye(self.dim)
vvn = ((v**2).sum(-1)+1e-10)
self.H = I - 2. * vv / vvn
@node_property
def forward(self):
z = self.z0 # sxd
H = self.H # dxd
if self.batched:
return tt.batched_dot(z.swapaxes(0, 1), H).swapaxes(0, 1)
else:
return z.dot(H)
@node_property
def logdet(self):
return tt.zeros((self.z0.shape[0],))
| apache-2.0 | 2,401,461,336,131,636,700 | 28.639932 | 104 | 0.510162 | false |
lionheartX/tensorflow-on-ios-tutorial | test.py | 1 | 1624 | import os
import numpy as np
import tensorflow as tf
from sklearn import metrics
checkpoint_dir = "/tmp/voice/"
X_test = np.load("X_test.npy")
y_test = np.load("y_test.npy")
print("Test set size:", X_test.shape)
# To compute on test set, if you have trained the model, you should have
# a graph.pb file, we're going to use this file to load the computational graph
# instead of re-writing it.
with tf.Session() as sess:
graph_file = os.path.join(checkpoint_dir, "graph.pb")
with tf.gfile.FastGFile(graph_file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")
#print(graph_def.ListFields()) #print what the graph looks like
# You can get_tensor_by_name if you give your nodes explicit names.
W = sess.graph.get_tensor_by_name("model/W:0")
b = sess.graph.get_tensor_by_name("model/b:0")
checkpoint_file = os.path.join(checkpoint_dir, "model")
saver = tf.train.Saver([W, b])
saver.restore(sess, checkpoint_file)
x = sess.graph.get_tensor_by_name("inputs/x-input:0")
y = sess.graph.get_tensor_by_name("inputs/y-input:0")
accuracy = sess.graph.get_tensor_by_name("score/accuracy:0")
inference = sess.graph.get_tensor_by_name("inference/inference:0")
feed = {x: X_test, y: y_test}
print("Test set accuracy:", sess.run(accuracy, feed_dict=feed))
# Additional report using scikit-learn
predictions = sess.run(inference, feed_dict={x: X_test})
print("Classification report:")
print(metrics.classification_report(y_test.ravel(), predictions))
print("Confusion matrix:")
print(metrics.confusion_matrix(y_test.ravel(), predictions)) | mit | 8,695,598,589,253,565,000 | 32.854167 | 79 | 0.71798 | false |
r00219985/python-tools | curl/curl.py | 1 | 1653 | # coding:utf-8
import os
import sys
import pycurl
c = pycurl.Curl()
URL = "http://www.baidu.com/"
c.setopt(pycurl.URL, URL)
# 连接超时时间,5秒
c.setopt(pycurl.CONNECTTIMEOUT, 5)
# 下载超时时间,5秒
c.setopt(pycurl.TIMEOUT, 5)
c.setopt(pycurl.FORBID_REUSE, 1)
c.setopt(pycurl.MAXREDIRS, 1)
c.setopt(pycurl.NOPROGRESS, 1)
c.setopt(pycurl.DNS_CACHE_TIMEOUT,30)
indexfile = open(os.path.dirname(os.path.realpath(__file__))+"/content.txt", "wb")
c.setopt(pycurl.WRITEHEADER, indexfile)
c.setopt(pycurl.WRITEDATA, indexfile)
try:
c.perform()
except Exception,e:
print "connecion error:"+str(e)
indexfile.close()
c.close()
sys.exit()
NAMELOOKUP_TIME = c.getinfo(c.NAMELOOKUP_TIME)
CONNECT_TIME = c.getinfo(c.CONNECT_TIME)
PRETRANSFER_TIME = c.getinfo(c.PRETRANSFER_TIME)
STARTTRANSFER_TIME = c.getinfo(c.STARTTRANSFER_TIME)
TOTAL_TIME = c.getinfo(c.TOTAL_TIME)
HTTP_CODE = c.getinfo(c.HTTP_CODE)
SIZE_DOWNLOAD = c.getinfo(c.SIZE_DOWNLOAD)
HEADER_SIZE = c.getinfo(c.HEADER_SIZE)
SPEED_DOWNLOAD = c.getinfo(c.SPEED_DOWNLOAD)
print "HTTP状态码:%s" % (HTTP_CODE)
print "DNS解析时间:%.2f ms" % (NAMELOOKUP_TIME*1000)
print "建立连接时间:%.2f ms" % (CONNECT_TIME*1000)
print "准备传输时间:%.2f ms" % (PRETRANSFER_TIME*1000)
print "传输开始时间:%.2f ms" % (STARTTRANSFER_TIME*1000)
print "传输结束总时间:%.2f ms" % (TOTAL_TIME*1000)
print "下载数据包大小:%d bytes/s" %(SIZE_DOWNLOAD)
print "HTTP头部大小:%d byte" %(HEADER_SIZE)
print "平均下载速度:%d bytes/s" %(SPEED_DOWNLOAD)
indexfile.close()
c.close()
| gpl-3.0 | -2,217,058,960,476,204,500 | 26.471698 | 82 | 0.691849 | false |
nurey/disclosed | app/search.py | 1 | 1979 | #!/usr/bin/env python
# encoding: utf-8
"""
search.py
Created by Ilia Lobsanov on 2008-04-12.
Copyright (c) 2008 Nurey Networks Inc. All rights reserved.
"""
import wsgiref.handlers
import logging
import os
#import pprint
#pp = pprint.PrettyPrinter(indent=4,depth=5)
from google.appengine.ext import webapp, search, db
from google.appengine.ext.db import GqlQuery
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
class Contract(db.Model):
uri = db.StringProperty()
agency_name = db.StringProperty()
vendor_name = db.StringProperty()
reference_number = db.StringProperty()
contract_date = db.StringProperty()
description = db.StringProperty()
contract_period = db.StringProperty()
delivery_date = db.StringProperty()
contract_value = db.StringProperty()
comments = db.TextProperty()
class MainPage(webapp.RequestHandler):
def get(self):
# We use the webapp framework to retrieve the keyword
keyword = self.request.get('keyword')
prop = self.request.get('prop')
self.response.headers['Content-Type'] = 'text/html'
template_values = {
'keyword': keyword,
'CURRENT_VERSION_ID': os.environ['CURRENT_VERSION_ID']
}
if keyword and prop:
query = Contract.all()
query.filter("%s =" % prop, keyword)
results = query.fetch(50)
template_values['results'] = results
elif keyword:
# Search the 'Contract' Entity based on our keyword
query = search.SearchableQuery('Contract')
query.Search(keyword)
results = []
results = query.Get(50)
template_values['results'] = results
else:
pass
path = os.path.join(os.path.dirname(__file__), 'search_results.html')
self.response.out.write(template.render(path, template_values))
application = webapp.WSGIApplication([('/', MainPage)], debug=True)
def main():
logging.getLogger().setLevel(logging.DEBUG)
run_wsgi_app(application)
if __name__ == "__main__":
main()
| mit | 8,614,315,598,677,794,000 | 26.109589 | 71 | 0.703386 | false |
danlamanna/scratch | geonotebook/layers.py | 1 | 14932 | from collections import namedtuple
from collections import OrderedDict
import sys
import six
from . import annotations
from .config import Config
from .vis.utils import discrete_colors, RasterStyleOptions, \
rgba2hex, VectorStyleOptions
BBox = namedtuple('BBox', ['ulx', 'uly', 'lrx', 'lry'])
class GeonotebookLayer(object):
# Control whether or not a layer can be modified
# within a geonotebook layer collection or not. e.g. base OSM
# map layer should not be deleted by the user
_system_layer = False
# _expose_as lets us control whether or not a layer is
# directly exposed as an attribute on a layer collection. It
# is designed for layers added by the system that provide
# some kind of functionality (e.g. the annotatoin layer).
_expose_as = None
# A class that serializes the layer's appearance. Defaults
# to RasterStyleOptions and is passed all keyword arguments
# passed to the constructor. This class must provide a
# "serialize" method returning the style representation
# that will be passed to the client.
StyleOptions = RasterStyleOptions
def __init__(self, name, remote, data, **kwargs):
self.config = Config()
self.remote = remote
self._name = name
self._system_layer = kwargs.pop("system_layer", False)
self._expose_as = kwargs.pop("expose_as", None)
self.vis_options = self.StyleOptions(**kwargs)
self.can_subset = False
def __repr__(self):
return "<{}('{}')>".format(
self.__class__.__name__, self.name)
def serialize(self):
return {
'name': self.name,
'vis_url': self.vis_url if hasattr(self, 'vis_url') else None,
'vis_options': self.vis_options.serialize(),
'query_params': self.query_params
}
@property
def name(self):
return self._name
@property
def query_params(self):
return {}
class AnnotationLayer(GeonotebookLayer):
_annotation_types = {
"point": annotations.Point,
"rectangle": annotations.Rectangle,
"polygon": annotations.Polygon
}
def serialize(self):
ret = super(AnnotationLayer, self).serialize()
ret.update({
'annotations': [annot.serialize() for annot in self._annotations]
})
return ret
def __init__(self, name, remote, layer_collection, **kwargs):
kwargs['layer_type'] = 'annotation'
super(AnnotationLayer, self).__init__(name, remote, None, **kwargs)
self.layer_collection = layer_collection
self._remote = remote
self.vis_url = None
self._annotations = []
def add_annotation(self, ann_type, coords, meta):
if ann_type == 'point':
meta['layer'] = self
self._annotations.append(
self._annotation_types[ann_type](coords, **meta))
elif ann_type in self._annotation_types.keys():
meta['layer'] = self
holes = meta.pop('holes', None)
self._annotations.append(
self._annotation_types[ann_type](coords, holes, **meta))
else:
raise RuntimeError("Cannot add annotation of type %s" % ann_type)
def clear_annotations(self):
# clear_annotations on _remote returns the
# number of annotations that were cleared.
# this isn't currently used inside the callback
# but that is the signature of the function.
def _clear_annotations(num):
self._annotations = []
return True
def rpc_error(error):
self.log.error(
"JSONRPCError (%s): %s" % (error['code'], error['message'])
)
def callback_error(exception):
self.log.error("Callback Error: %s" % exception[0])
return self._remote.clear_annotations().then(
_clear_annotations, rpc_error
).catch(callback_error)
@property
def points(self):
return [a for a in self._annotations
if type(a) == self._annotation_types['point']]
@property
def rectangles(self):
return [a for a in self._annotations
if type(a) == self._annotation_types['rectangle']]
@property
def polygons(self):
return [a for a in self._annotations
if type(a) == self._annotation_types['polygon']]
class NoDataLayer(GeonotebookLayer):
def __init__(self, name, remote, vis_url, **kwargs):
super(NoDataLayer, self).__init__(name, remote, None, **kwargs)
self.vis_url = vis_url
class DataLayer(GeonotebookLayer):
def __init__(self, name, remote, data=None, vis_url=None, **kwargs):
# Handle matplotlib like colormap conversion to list of
# dictionarys containing 'color' and 'quantity' keys.
if data is not None:
colormap = kwargs.get("colormap", None)
# If it's a matplotlib-like colormap generate a generic
# list-of-dicts colormap.
if hasattr(colormap, '__call__') and hasattr(colormap, 'N'):
kwargs['colormap'] = self.StyleOptions.get_colormap(
data, colormap, **kwargs)
# if single band and NO colormap, assign the default
# list-of-dicts colormap.
if colormap is None and hasattr(data, 'band_indexes') \
and len(data.band_indexes) == 1:
kwargs['colormap'] = self.StyleOptions.get_colormap(
data, None, **kwargs)
super(DataLayer, self).__init__(name, remote, data, **kwargs)
self.data = data
self.can_subset = True
assert vis_url is not None or data is not None, \
"Must pass in vis_url or data to {}".format(
self.__class__.__name__)
class VectorLayer(GeonotebookLayer):
StyleOptions = VectorStyleOptions
def __init__(self, name, remote, layer_collection, data, **kwargs):
# Here we are storing a reference to the layer collection itself.
# This is done to maintain API compatibility between annotation objects
# and vector data objects. For example, to subset all layers from a
# given geometry, you could use code such as the following:
#
# for layer, data in polygon.data:
# # data a numpy array of the data from "layer"
#
# In this case, the vector object must contain a reference to the
# map layer collection. In a future refactor, we want to change
# this behavior to make passing the layer collection here unnecessary.
# New layer types should avoid using this pattern if possible.
self.layer_collection = layer_collection
# handle styling options in order of precendence
colors = kwargs.get('colors')
if isinstance(colors, (list, tuple)): # a list of colors to use
pass
elif hasattr(colors, '__call__'): # a custom style accessor
kwargs['colors'] = [
rgba2hex(colors(d, i)) for i, d in enumerate(data)
]
elif 'colormap' in kwargs: # a matplotlib colormap
kwargs['colors'] = discrete_colors(kwargs['colormap'], len(data))
name = name or data.reader.name
data.layer = self
super(VectorLayer, self).__init__(name, remote, data, **kwargs)
self.data = data
# In the future, we want to serve this data as vector tiles rather
# than dumping the data directly to the client. This will match
# the existing interface for Raster datasets. Until we can transition
# fully to tiled vector features, we are abusing the interface by
# passing the actual data in place of the visualization url.
@property
def vis_url(self):
return self.data.geojson
class SimpleLayer(DataLayer):
def __init__(self, name, remote, data, vis_url=None, **kwargs):
super(SimpleLayer, self).__init__(
name, remote, data=data, vis_url=vis_url, **kwargs
)
if vis_url is None:
self.vis_url = self.config.vis_server.ingest(
self.data, name=self.name, **self.vis_options.serialize())
else:
self.vis_url = vis_url
@property
def name(self):
return "{}_{}".format(
self._name, hash(self.vis_options) + sys.maxsize + 1)
@property
def query_params(self):
return self.config.vis_server.get_params(
self.name, self.data, **self.vis_options.serialize())
def __repr__(self):
return "<{}('{}')>".format(
self.__class__.__name__, self.name.split("_")[0])
class TimeSeriesLayer(DataLayer):
def __init__(self, name, remote, data, vis_url=None, **kwargs):
super(TimeSeriesLayer, self).__init__(
name, remote, data=data, vis_url=None, **kwargs
)
self.__cur = 0
self._vis_urls = [None] * len(data)
self._remote = remote
if vis_url is None:
self._vis_urls[0] = self.config.vis_server.ingest(
self.current, name=self.name, **self.vis_options.serialize())
def __repr__(self):
return "<{}('{}')>".format(
self.__class__.__name__, self.name.split("_")[0])
@property
def vis_url(self):
return self._vis_urls[self._cur]
@property
def name(self):
return "{}_{}_{}".format(
self._name, self.current.name,
hash(self.vis_options) + sys.maxsize + 1)
@property
def query_params(self):
return self.config.vis_server.get_params(
self.current.name, self.current, **self.vis_options.serialize())
@property
def current(self):
return self.data[self._cur]
@property
def _cur(self):
return self.__cur
@_cur.setter
def _cur(self, value):
if value < 0:
raise IndexError("No time slice at index {}!".format(value))
if value >= len(self.data):
raise IndexError("No time slice at index {}!".format(value))
self.__cur = value
if self._vis_urls[value] is None:
self._vis_urls[value] = self.config.vis_server.ingest(
self.current, name=self.name, **self.vis_options.serialize())
def _replace_layer(self, idx):
prev_name = self.name
self._cur = idx
self._remote.replace_layer(prev_name, self.name, self.vis_url,
self.vis_options.serialize(),
self.query_params)\
.then(lambda: True, lambda: True)
return self.current
def idx(self, idx=None):
if idx is None:
return self._cur
else:
return self._replace_layer(idx)
def backward(self):
return self._replace_layer(self._cur - 1)
def forward(self):
return self._replace_layer(self._cur + 1)
class GeonotebookLayerCollection(object):
def __init__(self, layers=None):
self._layers = OrderedDict()
self._system_layers = OrderedDict()
if layers is not None:
for l in layers:
self.append(l)
# serialize functions must return a json-serializable data structure
def serialize(self, include_system_layers=True):
ret = {'layers': [],
'system_layers': []}
for name, layer in six.iteritems(self._layers):
if hasattr(layer, 'serialize') and callable(layer.serialize):
ret['layers'].append(layer.serialize())
if include_system_layers:
for name, layer in six.iteritems(self._system_layers):
if hasattr(layer, 'serialize') and callable(layer.serialize):
ret['system_layers'].append(layer.serialize())
return ret
def append(self, value):
if isinstance(value, GeonotebookLayer):
if value._system_layer:
if value.name not in self._system_layers:
self._system_layers[value.name] = value
else:
raise Exception(
"There is already a layer named %s" % value.name
)
else:
if value.name not in self._layers:
self._layers[value.name] = value
else:
raise Exception(
"There is already a layer named %s" % value.name
)
if value._expose_as is not None:
self._expose_layer(value)
else:
raise Exception("Can only append GeonotebookLayers to Collection")
def remove(self, value):
if isinstance(value, six.string_types):
del self._layers[value]
elif isinstance(value, GeonotebookLayer):
del self._layers[value.name]
def find(self, predicate):
"""Find first GeonotebookLayer that matches predicate.
If predicate is not callable, it will check predicate against each
layer name.
"""
if not hasattr(predicate, '__call__'):
try:
return self._layers[predicate]
except KeyError:
return None
try:
# Note that we never find a system layer
return next(l for l in self._layers.values() if predicate(l))
except StopIteration:
return None
def __getitem__(self, value):
if isinstance(value, six.integer_types):
return [
layer for name, layer in six.iteritems(self._layers)
][value]
else:
return self._layers.__getitem__(value)
def __setitem__(self, index, value):
if isinstance(value, GeonotebookLayer):
if value._system_layer:
raise Exception("Cannot add a system layer via __setitem__")
if isinstance(index, six.integer_types):
self.__setitem__(
[
name for name, layer in six.iteritems(self._layers)
][index],
value)
else:
self._layers.__setitem__(index, value)
else:
raise Exception("Can only add GeonotebookLayers to Collection")
def __repr__(self):
return "<GeonotebookLayerCollection({})>".format(
([layer for layer in self._layers.values()]).__repr__())
def __len__(self):
return len(self._layers)
def _expose_layer(self, layer):
if layer._expose_as is not None:
if not hasattr(self, layer._expose_as):
setattr(self, layer._expose_as, layer)
else:
raise RuntimeError(
'Failed exposing "%s", attribute already exists' %
layer._expose_as)
| apache-2.0 | 509,879,575,440,018,900 | 32.706546 | 79 | 0.574404 | false |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/readwrite/json_graph/tests/test_cytoscape.py | 1 | 1990 | import json
import pytest
import networkx as nx
from networkx.readwrite.json_graph import cytoscape_data, cytoscape_graph
class TestCytoscape:
def test_graph(self):
G = nx.path_graph(4)
H = cytoscape_graph(cytoscape_data(G))
nx.is_isomorphic(G, H)
def test_graph_attributes(self):
G = nx.path_graph(4)
G.add_node(1, color="red")
G.add_edge(1, 2, width=7)
G.graph["foo"] = "bar"
G.graph[1] = "one"
G.add_node(3, name="node", id="123")
H = cytoscape_graph(cytoscape_data(G))
assert H.graph["foo"] == "bar"
assert H.nodes[1]["color"] == "red"
assert H[1][2]["width"] == 7
assert H.nodes[3]["name"] == "node"
assert H.nodes[3]["id"] == "123"
d = json.dumps(cytoscape_data(G))
H = cytoscape_graph(json.loads(d))
assert H.graph["foo"] == "bar"
assert H.graph[1] == "one"
assert H.nodes[1]["color"] == "red"
assert H[1][2]["width"] == 7
assert H.nodes[3]["name"] == "node"
assert H.nodes[3]["id"] == "123"
def test_digraph(self):
G = nx.DiGraph()
nx.add_path(G, [1, 2, 3])
H = cytoscape_graph(cytoscape_data(G))
assert H.is_directed()
nx.is_isomorphic(G, H)
def test_multidigraph(self):
G = nx.MultiDiGraph()
nx.add_path(G, [1, 2, 3])
H = cytoscape_graph(cytoscape_data(G))
assert H.is_directed()
assert H.is_multigraph()
def test_multigraph(self):
G = nx.MultiGraph()
G.add_edge(1, 2, key="first")
G.add_edge(1, 2, key="second", color="blue")
H = cytoscape_graph(cytoscape_data(G))
assert nx.is_isomorphic(G, H)
assert H[1][2]["second"]["color"] == "blue"
def test_exception(self):
with pytest.raises(nx.NetworkXError):
G = nx.MultiDiGraph()
attrs = dict(name="node", ident="node")
cytoscape_data(G, attrs)
| gpl-3.0 | 8,584,986,071,702,529,000 | 30.587302 | 73 | 0.540704 | false |
sassoftware/mirrorball | updatebot/groupmgr/group.py | 1 | 17094 | #
# Copyright (c) SAS Institute, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Module for modeling the contents of a top level group.
"""
import logging
from conary.deps import deps
from updatebot.errors import FlavorCountMismatchError
from updatebot.errors import UnknownBuildContextError
from updatebot.errors import UnsupportedTroveFlavorError
from updatebot.errors import UnhandledPackageAdditionError
from updatebot.errors import UnknownPackageFoundInManagedGroupError
from updatebot.groupmgr.model import GroupContentsModel
log = logging.getLogger('updatebot.groupmgr')
def require_write(func):
def wrapper(self, *args, **kwargs):
if not hasattr(self, '_readOnly'):
log.warn('instance has no attribute _readOnly, assuming writable')
readOnly = True
else:
readOnly = self._readOnly
if readOnly:
raise RuntimeError, 'This group is marked as readonly.'
else:
self._dirty = True
return func(self, *args, **kwargs)
return wrapper
def enforce_readonly(attr):
def set(self, value):
if self._readOnly:
raise RuntimeError, 'This attribute is marked as read only.'
else:
self._dirty = True
setattr(self, attr, value)
def get(self):
return getattr(self, attr)
return property(get, set)
class Group(object):
"""
Class for managing group contents.
"""
def __init__(self, cfg, useMap, sanityChecker, groupmgr, pkgGroupName,
groups, errataState, version, conaryVersion):
self._cfg = cfg
self._groups = groups
self._useMap = useMap
self._sanity = sanityChecker
self._mgr = groupmgr
self._pkgGroupName = pkgGroupName
self._errataState = errataState
self._version = version
self._conaryVersion = conaryVersion
self._dirty = False
self._committed = False
self._readOnly = False
errataState = enforce_readonly('_errataState')
version = enforce_readonly('_version')
conaryVersion = enforce_readonly('_conaryVersion')
def setReadOnly(self):
"""
Make this group read only.
"""
self._readOnly = True
@property
def dirty(self):
"""
Check if an instance has been modified in some way.
"""
return self._dirty
@property
def committed(self):
"""
Check if an instances has been marked as committed.
"""
return self._committed or not self._dirty
def setCommitted(self):
"""
Mark this group as committed.
"""
self._dirty = False
self._committed = True
def __hash__(self):
"""
Make groups hashable.
"""
return hash(self._conaryVersion)
def __cmp__(self, other):
"""
Compare groups to other groups.
"""
return cmp(self._conaryVersion, other._conaryVersion)
def __iter__(self):
"""
Iterate over group model instances.
"""
return self._groups.itervalues()
def iteritems(self):
"""
Iterate over groupName, groupModel pairs.
"""
return self._groups.iteritems()
def iterpackages(self):
"""
Iterate over the set of packages in the package group.
"""
return self._groups[self._pkgGroupName]
###
# Start of group manager interface
#
# Since we sever any relation between the group manager and group instance
# at commit time we should avoid the circular reference loop.
###
def commit(self, copyToLatest=False):
"""
Save this group to the repository.
"""
return self._mgr.setGroup(self, copyToLatest=copyToLatest)
def build(self):
"""
Build this group.
"""
return self._mgr.buildGroup(self)
def buildmany(self):
"""
Build this group alongside other groups.
"""
return self._mgr.buildGroup(self, async=True)
def hasBinaryVersion(self):
"""
Check if this group has a binary version.
"""
return self._mgr.hasBinaryVersion(sourceVersion=self.conaryVersion)
###
# end group manager interface
###
@require_write
def _add(self, *args, **kwargs):
"""
Add a trove to the package group contents.
"""
groupName = kwargs.pop('groupName', self._pkgGroupName)
# create package group model if it does not exist.
if groupName not in self._groups:
self._groups[groupName] = GroupContentsModel(groupName)
self._groups[groupName].add(*args, **kwargs)
@require_write
def addPackage(self, name, version, flavors, groupName=None):
"""
Add a package to the model.
@param name: name of the package
@type name: str
@param version: conary version from string object
@type version: conary.versions.VersionFromString
@param flavors: list of flavors
@type flavors: [conary.deps.deps.Flavor, ...]
"""
if not groupName:
groupName = self._pkgGroupName
# Now that versions are actually used for something make sure they
# are always present.
if groupName == self._pkgGroupName:
assert version
assert len(flavors)
flavors = list(flavors)
# Remove all versions and flavors of this name before adding this
# package. This avoids flavor change issues by replacing all flavors.
if self.hasPackage(name):
self.removePackage(name)
plain = deps.parseFlavor('')
x86 = deps.parseFlavor('is: x86')
x86_64 = deps.parseFlavor('is: x86_64')
biarch = deps.parseFlavor('is: x86 x86_64')
# Count the flavors for later use.
flvMap = {}
flvCount = {x86: 0, x86_64: 0, plain: 0, biarch: 0}
for flavor in flavors:
# NOTE: Biarch must come first since a biarch flavored binary also
# saitisfies both x86 and x86_64.
if flavor.satisfies(biarch):
flvCount[biarch] += 1
flvMap[flavor] = 'x86_64'
elif flavor.satisfies(x86):
flvCount[x86] += 1
flvMap[flavor] = 'x86'
elif flavor.satisfies(x86_64):
flvCount[x86_64] += 1
flvMap[flavor] = 'x86_64'
elif flavor.freeze() == '':
flvCount[plain] += 1
flvMap[flavor] = None
else:
raise UnsupportedTroveFlavorError(name=name, flavor=flavor)
def add():
upver = version.trailingRevision().version
for flv in flavors:
primary = (name, upver, flvMap[flv])
secondary = (name, flvMap[flv])
use = self._useMap.get(primary, self._useMap.get(secondary, []))
if use:
for useStr in use:
self._add(name, version=version, flavor=flv,
use=useStr, groupName=groupName)
else:
log.warn('%s=%s[%s] not found in useMap, falling back to '
'old method of adding troves to groups'
% (name, version, flvMap[flv]))
self._add(name, version=version, flavor=flv,
use=flvMap[flv], groupName=groupName)
# If this package has one or two flavors and one of those flavors is
# x86, x86_64, biarch, or plain then handle it like a normal package
# without doing any more sanity checking.
total = 0
for flv, count in flvCount.iteritems():
if count > 1:
break
total += count
else:
if total in (1, 2, 3):
add()
return
# Handle all other odd flavor cases:
# 1. kernels
# 2. kernel modules
# 3. packages with specifically defined flavor sets
# Check if this package is configured to have multiple flavors.
# Get source trove name.
log.info('retrieving trove info for %s' % name)
srcTroveMap = self._mgr._helper._getSourceTroves((name, version, flavors[0]))
srcTroveName = srcTroveMap.keys()[0][0].split(':')[0]
# Check if this is package that we have specifically defined a build
# flavor for.
if srcTroveName in self._cfg.packageFlavors:
# separate packages into x86 and x86_64 by context name
# TODO: If we were really smart we would load the conary
# contexts and see what buildFlavors they contained.
flavorCtxCount = {x86: 0, x86_64: 0, biarch: 0}
ctxMap = dict([ (x, y[1]) for x, y in self._cfg.archContexts if y ])
for context, bldflv in self._cfg.packageFlavors[srcTroveName]:
fltr = ctxMap.get(context, None)
if context in ('i386', 'i486', 'i586', 'i686', 'x86'):
flavorCtxCount[x86] += 1
elif context in ('x86_64', ):
flavorCtxCount[x86_64] += 1
elif context in ('biarch', ):
if fltr and fltr.match(name):
flavorCtxCount[biarch] += 1
else:
raise UnknownBuildContextError(name=name, flavor=context)
# Sanity check flavors to make sure we built all the flavors
# that we expected.
if (flvCount[x86] != flavorCtxCount[x86] or
flvCount[x86_64] != flavorCtxCount[x86_64] or
# Only enforce biarch for packages that we expect to be biarch.
# This is a kluge to deal with the fact that biarch builds
# produce a byDefault=False package for the source that only
# contains the build log.
(flavorCtxCount[biarch] > 0 and
flvCount[biarch] != flavorCtxCount[biarch])):
raise FlavorCountMismatchError(name=name)
# Add packages to the group.
add()
return
# handle kernels.
if srcTroveName == 'kernel' or srcTroveName in self._cfg.kernelModules:
# add all x86ish flavors with use=x86 and all x86_64ish flavors
# with use=x86_64
for flavor in flavors:
if flvMap[flavor] in ('x86', 'x86_64'):
self._add(name, version=version, flavor=flavor,
use=flvMap[flavor], groupName=groupName)
else:
raise UnsupportedTroveFlavorError(name=name, flavor=flavor)
return
# don't know how to deal with this package.
raise UnhandledPackageAdditionError(name=name)
@require_write
def removePackage(self, name, flavor=None, missingOk=False):
"""
Remove a given trove from the package group contents.
"""
if self._pkgGroupName not in self._groups:
return
group = self._groups[self._pkgGroupName]
#if flavor:
if isinstance(flavor, deps.Flavor):
group.removePackageFlavor(name, flavor.freeze())
else:
group.remove(name, missingOk=missingOk)
def hasPackage(self, name):
"""
Check if a given package name is in the group.
"""
return (self._pkgGroupName in self._groups and
name in self._groups[self._pkgGroupName])
__contains__ = hasPackage
@require_write
def modifyContents(self, additions=None, removals=None):
"""
Modify the contents of the group model by adding and/or removing
packages.
@param additions: dictionary of group names to add packages to.
@type additions: dict(groupName=[(pkgName, PkgFlavor, use), ...])
@param removals: dictionary of group names to remove packages from.
@type additions: dict(groupName=[(pkgName, PkgFlavor, use), ...])
"""
if additions is None:
additions = {}
if removals is None:
removals = {}
# 1. Apply removals before additions in case we are changing flavors
# 2. If flavor is specified, only modify that single flavor, otherwise
# following normal addition rules as stated in addPackage.
# Remove requested packages.
for groupName, pkgs in removals.iteritems():
group = self._groups[groupName]
for pkgName, pkgFlv, use in pkgs:
if pkgFlv:
group.removePackageFlavor(pkgName, pkgFlv.freeze())
else:
# Need to set missingok to True in case this package was
# never added to the group in the first place because it is
# no longer in the packages group. This tends to happen
# during a group rebuild when regenerating the standard
# group.
group.remove(pkgName, missingOk=True)
# Add requested packages.
for groupName, pkgs in additions.iteritems():
flavoredPackages = {}
for pkgName, pkgFlv, use in pkgs:
# deffer packages with specifc flavors for later.
if pkgFlv is not None:
flavoredPackages.setdefault(pkgName, set()).add((pkgFlv, use))
# handle packages where flavor is not specified
else:
# copy packages from the packages group.
for pkg in self._groups[self._pkgGroupName]:
if pkg.name == pkgName:
self._add(pkg.name, version=None,
flavor=pkg.flavor, use=pkg.use,
groupName=groupName)
# Add all specifically flavored packages.
for pkgName, flags in flavoredPackages.iteritems():
for flv, use in flags:
self._add(pkgName, version=None, flavor=flv, use=use,
groupName=groupName)
@require_write
def _copyVersions(self):
"""
Copy versions from the packages group to the other managed groups.
"""
# Get the versions of all packge names.
pkgs = dict([ (x.name, x) for x in self._groups[self._pkgGroupName]
if x.version is not None ])
for group in self:
# skip over package group since it is the version source.
if group.groupName == self._pkgGroupName:
required = [ x for x in group if x.version is None ]
for pkg in required:
if pkg.name in pkgs:
pkg.version = pkgs[pkg.name].version
else:
raise UnknownPackageFoundInManagedGroupError(what=pkg.name)
# for all other groups iterate over contents and set versions to
# match package group.
for pkg in group:
if pkg.name in pkgs:
pkg.version = pkgs[pkg.name].version
else:
raise UnknownPackageFoundInManagedGroupError(what=pkg.name)
def _sanityCheck(self):
"""
Validate the group contents. This will raise an exception if any errors
are found.
"""
self._sanity.check(self._groups, self.errataState)
def _setGroupFlags(self):
"""
Set flags on the group based on the groupContents configuration.
"""
for groupName, groupObj in self._groups.iteritems():
flags = self._cfg.groupContents.get(groupName, {})
for key, value in flags.iteritems():
value = value == 'True' and True or False
setattr(groupObj, key, value)
@require_write
def finalize(self):
"""
Handle any steps to prepair the group model before saving to disk.
"""
# Copy versions from the package group to all other groups.
self._copyVersions()
# Check the sanity of all group models.
self._sanityCheck()
# Make sure flags on the group match the config.
self._setGroupFlags()
# Make as readonly.
self.setReadOnly()
| apache-2.0 | 6,035,839,908,508,412,000 | 33.051793 | 85 | 0.570259 | false |
edm1/myopia-education-MR | 1_extract_instruments/merge_weights_to_dosage.py | 1 | 2881 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
import pandas as pd
from functools import partial
from pprint import pprint
def main():
# Args
indosage = sys.argv[1]
inweights = sys.argv[2]
insample = sys.argv[3]
outf = sys.argv[4]
# Load weights into dict
weight_dict = load_weights(inweights)
# pprint(weight_dict)
# sys.exit()
# Load sample names
samples = load_sample_list(insample)
# Load dosage data
dosage = pd.read_csv(indosage, sep=" ", header=None)
dosage.columns = ["chr", "snpid", "rsid", "pos", "alleleA", "alleleB"] + samples
# Flip weights so that they correspond to 1 increase in alleleB
weights = dosage.apply(partial(match_allele_weights, w=weight_dict), axis=1)
dosage.insert(6, "weightB", weights)
# Write file
dosage.to_csv(outf, sep="\t", index=None)
return 0
def load_sample_list(inf):
""" Loads list of sample names from SNPTEST sample file
"""
sample_list = []
with open(inf, "r") as in_h:
# Skip 2 headers
in_h.readline()
in_h.readline()
# Read all in
for line in in_h:
id1, id2, missing = line.rstrip().split(" ")
if not id1 == id2:
sys.exit("ID1 and ID2 were different in sample file:", id1, id2)
sample_list.append(id1)
return sample_list
def match_allele_weights(row, w):
""" Check that effect allele matches alleleB, if not flip direction of the
weight.
"""
# Get info from dosage row
rsid = row["rsid"]
alleleA = row["alleleA"]
alleleB = row["alleleB"]
# Check that alleles are the same
if not sorted([alleleA, alleleB]) == sorted(list(w[rsid]["alleles"])):
sys.exit(("Error: Alleles don't match for: ", rsid))
# If effect allele is alleleB, return weight otherwise *-1
if alleleB == w[rsid]["alleles"][1]:
return float(w[rsid]["weight"])
else:
return -1*float(w[rsid]["weight"])
def load_weights(inf, sep="\t"):
""" For each variant load weight and (Other_allele, Effect_allele) into
a dict. Cols required: MarkerName, Beta, Other_allele, Effect_allele
"""
w = {}
with open(inf, "r") as in_h:
# Load header and discover column indexs
header = in_h.readline().rstrip().split(sep)
cols = {}
for key in ["MarkerName", "Beta", "Other_allele", "Effect_allele"]:
cols[key] = header.index(key)
# For each line save variants, alleles and weight
for line in in_h:
parts = line.rstrip().split(sep)
alleles = (parts[cols["Other_allele"]], parts[cols["Effect_allele"]])
w[parts[cols["MarkerName"]]] = {"weight":parts[cols["Beta"]],
"alleles":alleles}
return w
if __name__ == '__main__':
main()
| mit | -2,071,590,774,918,096,600 | 29.978495 | 84 | 0.585561 | false |
F5Networks/f5-ansible | ansible_collections/f5networks/f5_modules/plugins/modules/bigiq_application_https_waf.py | 1 | 33157 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigiq_application_https_waf
short_description: Manages BIG-IQ HTTPS WAF applications
description:
- Manages BIG-IQ applications used for load balancing an HTTPS application on port 443
with a Web Application Firewall (WAF) using an ASM (Application Security Manager) Rapid Deployment policy.
version_added: "1.0.0"
options:
name:
description:
- Name of the new application.
type: str
required: True
description:
description:
- Description of the application.
type: str
servers:
description:
- A list of servers on which the application is hosted.
- If you are familiar with other BIG-IP settings, you might also refer to this
list as the list of pool members.
- When creating a new application, at least one server is required.
type: list
elements: dict
suboptions:
address:
description:
- The IP address of the server.
type: str
required: True
port:
description:
- The port of the server.
type: str
default: 80
inbound_virtual:
description:
- Settings to configure the virtual which receives the inbound connection.
- This virtual is used to host the HTTPS endpoint of the application.
- Traffic destined to the C(redirect_virtual) is offloaded to this
parameter to ensure proper redirection from insecure to secure occurs.
type: dict
suboptions:
address:
description:
- Specifies destination IP address information to which the virtual server
sends traffic.
- This parameter is required when creating a new application.
type: str
required: True
netmask:
description:
- Specifies the netmask to associate with the given C(destination).
- This parameter is required when creating a new application.
type: str
required: True
port:
description:
- The port on which the virtual listens for connections.
- When creating a new application, if this parameter is not specified, the
default value is C(443).
type: str
default: 443
redirect_virtual:
description:
- Settings to configure the virtual which receives the connection to be
redirected.
- This virtual is used to host the HTTP endpoint of the application.
- Traffic destined to this parameter is offloaded to the
C(inbound_virtual) parameter to ensure proper redirection from insecure
to secure occurs.
type: dict
suboptions:
address:
description:
- Specifies destination IP address information to which the virtual server
sends traffic.
- This parameter is required when creating a new application.
type: str
required: True
netmask:
description:
- Specifies the netmask to associate with the given C(destination).
- This parameter is required when creating a new application.
type: str
required: True
port:
description:
- The port on which the virtual listens for connections.
- When creating a new application, if this parameter is not specified, the
default value of C(80) will be used.
type: str
default: 80
client_ssl_profile:
description:
- Specifies the SSL profile for managing client-side SSL traffic.
type: dict
suboptions:
name:
description:
- The name of the client SSL profile to created and used.
- When creating a new application, if this value is not specified, the
default value of C(clientssl) will be used.
type: str
default: clientssl
cert_key_chain:
description:
- One or more certificates and keys to associate with the SSL profile.
- This option is always a list. The keys in the list dictate the details
of the client/key/chain/passphrase combination.
- BIG-IPs can only have one of each type of each certificate/key
type. This means you can only have one RSA, one DSA, and one ECDSA
per profile.
- If you attempt to assign two RSA, DSA, or ECDSA certificate/key combo,
the device rejects it.
- This list is a complex list that specifies a number of keys.
- When creating a new profile, if this parameter is not specified, the
default value is C(inherit).
type: raw
suboptions:
cert:
description:
- Specifies a cert name for use.
type: str
key:
description:
- Specifies a key name.
type: str
chain:
description:
- Specifies a certificate chain that is relevant to the certificate and
key.
- This key is optional.
type: str
passphrase:
description:
- Contains the passphrase of the key file, should it require one.
- Passphrases are encrypted on the remote BIG-IP device.
type: str
service_environment:
description:
- Specifies the name of service environment the application will be
deployed to.
- When creating a new application, this parameter is required.
type: str
add_analytics:
description:
- Collects statistics of the BIG-IP that the application is deployed to.
- This parameter is only relevant when specifying a C(service_environment) which
is a BIG-IP; not an SSG.
type: bool
default: no
domain_names:
description:
- Specifies host names that are used to access the web application that this
security policy protects.
- When creating a new application, this parameter is required.
type: list
elements: str
state:
description:
- The state of the resource on the system.
- When C(present), guarantees the resource exists with the provided attributes.
- When C(absent), removes the resource from the system.
type: str
choices:
- absent
- present
default: present
wait:
description:
- If the module should wait for the application to be created, deleted, or updated.
type: bool
default: yes
extends_documentation_fragment: f5networks.f5_modules.f5
notes:
- This module does not work on BIG-IQ version 6.1.x or greater.
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Load balance an HTTPS application on port 443 with a WAF using ASM
bigiq_application_https_waf:
name: my-app
description: Redirect HTTP to HTTPS via WAF
service_environment: my-ssg
servers:
- address: 1.2.3.4
port: 8080
- address: 5.6.7.8
port: 8080
inbound_virtual:
address: 2.2.2.2
netmask: 255.255.255.255
port: 443
redirect_virtual:
address: 2.2.2.2
netmask: 255.255.255.255
port: 80
provider:
password: secret
server: lb.mydomain.com
user: admin
state: present
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the application of the resource.
returned: changed
type: str
sample: My application
service_environment:
description: The environment to which the service was deployed.
returned: changed
type: str
sample: my-ssg1
inbound_virtual_destination:
description: The destination of the virtual that was created.
returned: changed
type: str
sample: 6.7.8.9
inbound_virtual_netmask:
description: The network mask of the provided inbound destination.
returned: changed
type: str
sample: 255.255.255.0
inbound_virtual_port:
description: The port on which the inbound virtual address listens.
returned: changed
type: int
sample: 80
servers:
description: List of servers, and their ports, that make up the application.
type: complex
returned: changed
contains:
address:
description: The IP address of the server.
returned: changed
type: str
sample: 2.3.4.5
port:
description: The port on which the server listens.
returned: changed
type: int
sample: 8080
sample: hash/dictionary of values
'''
import time
from datetime import datetime
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, f5_argument_spec, fq_name
)
from ..module_utils.icontrol import bigiq_version
from ..module_utils.ipaddress import is_valid_ip
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'templateReference': 'template_reference',
'subPath': 'sub_path',
'ssgReference': 'ssg_reference',
'configSetName': 'config_set_name',
'defaultDeviceReference': 'default_device_reference',
'addAnalytics': 'add_analytics',
'domains': 'domain_names'
}
api_attributes = [
'resources', 'description', 'configSetName', 'subPath', 'templateReference',
'ssgReference', 'defaultDeviceReference', 'addAnalytics', 'domains'
]
returnables = [
'resources', 'description', 'config_set_name', 'sub_path', 'template_reference',
'ssg_reference', 'default_device_reference', 'servers', 'inbound_virtual',
'redirect_virtual', 'client_ssl_profile', 'add_analytics', 'domain_names'
]
updatables = [
'resources', 'description', 'config_set_name', 'sub_path', 'template_reference',
'ssg_reference', 'default_device_reference', 'servers', 'add_analytics', 'domain_names'
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def http_profile(self):
return "profile_http"
@property
def config_set_name(self):
return self.name
@property
def sub_path(self):
return self.name
@property
def template_reference(self):
filter = "name+eq+'Default-f5-HTTPS-WAF-lb-template'"
uri = "https://{0}:{1}/mgmt/cm/global/templates/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No default HTTP LB template was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
@property
def default_device_reference(self):
if is_valid_ip(self.service_environment):
# An IP address was specified
filter = "address+eq+'{0}'".format(self.service_environment)
else:
# Assume a hostname was specified
filter = "hostname+eq+'{0}'".format(self.service_environment)
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-adccore-allbigipDevices/devices/" \
"?$filter={2}&$top=1&$select=selfLink".format(self.client.provider['server'],
self.client.provider['server_port'], filter)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
return None
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
@property
def ssg_reference(self):
filter = "name+eq+'{0}'".format(self.service_environment)
uri = "https://{0}:{1}/mgmt/cm/cloud/service-scaling-groups/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
return None
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
@property
def domain_names(self):
if self._values['domain_names'] is None:
return None
result = []
for domain in self._values['domain_names']:
result.append(
dict(
domainName=domain
)
)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def resources(self):
result = dict()
result.update(self.http_profile)
result.update(self.http_monitor)
result.update(self.inbound_virtual_server)
result.update(self.redirect_virtual_server)
result.update(self.pool)
result.update(self.nodes)
result.update(self.ssl_profile)
return result
@property
def inbound_virtual_server(self):
result = dict()
result['ltm:virtual:90735960bf4b'] = [
dict(
parameters=dict(
name='default_vs',
destinationAddress=self.inbound_virtual['address'],
mask=self.inbound_virtual['netmask'],
destinationPort=self.inbound_virtual['port']
),
subcollectionResources=self.inbound_profiles
)
]
return result
@property
def inbound_profiles(self):
result = {
'profiles:78b1bcfdafad': [
dict(
parameters=dict()
)
],
'profiles:2f52acac9fde': [
dict(
parameters=dict()
)
],
'profiles:9448fe71611e': [
dict(
parameters=dict()
)
]
}
return result
@property
def redirect_virtual_server(self):
result = dict()
result['ltm:virtual:3341f412b980'] = [
dict(
parameters=dict(
name='default_redirect_vs',
destinationAddress=self.redirect_virtual['address'],
mask=self.redirect_virtual['netmask'],
destinationPort=self.redirect_virtual['port']
),
subcollectionResources=self.redirect_profiles
)
]
return result
@property
def redirect_profiles(self):
result = {
'profiles:2f52acac9fde': [
dict(
parameters=dict()
)
],
'profiles:9448fe71611e': [
dict(
parameters=dict()
)
]
}
return result
@property
def pool(self):
result = dict()
result['ltm:pool:8bc5b256f9d1'] = [
dict(
parameters=dict(
name='pool_0'
),
subcollectionResources=self.pool_members
)
]
return result
@property
def pool_members(self):
result = dict()
result['members:dec6d24dc625'] = []
for x in self.servers:
member = dict(
parameters=dict(
port=x['port'],
nodeReference=dict(
link='#/resources/ltm:node:c072248f8e6a/{0}'.format(x['address']),
fullPath='# {0}'.format(x['address'])
)
)
)
result['members:dec6d24dc625'].append(member)
return result
@property
def http_profile(self):
result = dict()
result['ltm:profile:http:2f52acac9fde'] = [
dict(
parameters=dict(
name='profile_http'
)
)
]
return result
@property
def http_monitor(self):
result = dict()
result['ltm:monitor:http:18765a198150'] = [
dict(
parameters=dict(
name='monitor-http'
)
)
]
return result
@property
def nodes(self):
result = dict()
result['ltm:node:c072248f8e6a'] = []
for x in self.servers:
tmp = dict(
parameters=dict(
name=x['address'],
address=x['address']
)
)
result['ltm:node:c072248f8e6a'].append(tmp)
return result
@property
def node_addresses(self):
result = [x['address'] for x in self.servers]
return result
@property
def ssl_profile(self):
result = dict()
result['ltm:profile:client-ssl:78b1bcfdafad'] = [
dict(
parameters=dict(
name='clientssl',
certKeyChain=self.cert_key_chains
)
)
]
return result
def _get_cert_references(self):
result = dict()
uri = "https://{0}:{1}/mgmt/cm/adc-core/working-config/sys/file/ssl-cert/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
for cert in response['items']:
key = fq_name(cert['partition'], cert['name'])
result[key] = cert['selfLink']
return result
def _get_key_references(self):
result = dict()
uri = "https://{0}:{1}/mgmt/cm/adc-core/working-config/sys/file/ssl-key/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
for cert in response['items']:
key = fq_name(cert['partition'], cert['name'])
result[key] = cert['selfLink']
return result
@property
def cert_key_chains(self):
result = []
if self.client_ssl_profile is None:
return None
if 'cert_key_chain' not in self.client_ssl_profile:
return None
kc = self.client_ssl_profile['cert_key_chain']
if isinstance(kc, string_types) and kc != 'inherit':
raise F5ModuleError(
"Only the 'inherit' setting is available when 'cert_key_chain' is a string."
)
if not isinstance(kc, list):
raise F5ModuleError(
"The value of 'cert_key_chain' is not one of the supported types."
)
cert_references = self._get_cert_references()
key_references = self._get_key_references()
for idx, x in enumerate(kc):
tmp = dict(
name='clientssl{0}'.format(idx)
)
if 'cert' not in x:
raise F5ModuleError(
"A 'cert' option is required when specifying the 'cert_key_chain' parameter.."
)
elif x['cert'] not in cert_references:
raise F5ModuleError(
"The specified 'cert' was not found. Did you specify its full path?"
)
else:
key = x['cert']
tmp['certReference'] = dict(
link=cert_references[key],
fullPath=key
)
if 'key' not in x:
raise F5ModuleError(
"A 'key' option is required when specifying the 'cert_key_chain' parameter.."
)
elif x['key'] not in key_references:
raise F5ModuleError(
"The specified 'key' was not found. Did you specify its full path?"
)
else:
key = x['key']
tmp['keyReference'] = dict(
link=key_references[key],
fullPath=key
)
if 'chain' in x and x['chain'] not in cert_references:
raise F5ModuleError(
"The specified 'key' was not found. Did you specify its full path?"
)
else:
key = x['chain']
tmp['chainReference'] = dict(
link=cert_references[key],
fullPath=key
)
if 'passphrase' in x:
tmp['passphrase'] = x['passphrase']
result.append(tmp)
return result
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.want.client = self.client
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
self.changes.client = self.client
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
self.changes.client = self.client
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def check_bigiq_version(self, version):
if LooseVersion(version) >= LooseVersion('6.1.0'):
raise F5ModuleError(
'Module supports only BIGIQ version 6.0.x or lower.'
)
def exec_module(self):
start = datetime.now().isoformat()
version = bigiq_version(self.client)
self.check_bigiq_version(version)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.client, self.module, version)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return False
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/ap/query/v1/tenants/default/reports/AllApplicationsList?" \
"$filter=name+eq+'{2}'".format(self.client.provider['server'],
self.client.provider['server_port'], self.want.name)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if (resp.status == 200 and 'result' in response and
'totalItems' in response['result'] and response['result']['totalItems'] == 0):
return False
return True
def remove(self):
if self.module.check_mode:
return True
self_link = self.remove_from_device()
if self.want.wait:
self.wait_for_apply_template_task(self_link)
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def has_no_service_environment(self):
if self.want.default_device_reference is None and self.want.ssg_reference is None:
return True
return False
def create(self):
if self.want.service_environment is None:
raise F5ModuleError(
"A 'service_environment' must be specified when creating a new application."
)
if self.want.servers is None:
raise F5ModuleError(
"At least one 'servers' item is needed when creating a new application."
)
if self.want.inbound_virtual is None:
raise F5ModuleError(
"An 'inbound_virtual' must be specified when creating a new application."
)
if self.want.domain_names is None:
raise F5ModuleError(
"You must provide at least one value in the 'domain_names' parameter."
)
self._set_changed_options()
if self.has_no_service_environment():
raise F5ModuleError(
"The specified 'service_environment' ({0}) was not found.".format(self.want.service_environment)
)
if self.module.check_mode:
return True
self_link = self.create_on_device()
if self.want.wait:
self.wait_for_apply_template_task(self_link)
if not self.exists():
raise F5ModuleError(
"Failed to deploy application."
)
return True
def create_on_device(self):
params = self.changes.api_params()
params['mode'] = 'CREATE'
uri = 'https://{0}:{1}/mgmt/cm/global/tasks/apply-template'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['selfLink']
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
params = dict(
configSetName=self.want.name,
mode='DELETE'
)
uri = 'https://{0}:{1}/mgmt/cm/global/tasks/apply-template'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['selfLink']
def wait_for_apply_template_task(self, self_link):
host = 'https://{0}:{1}'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
uri = self_link.replace('https://localhost', host)
while True:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if response['status'] == 'FINISHED' and response.get('currentStep', None) == 'DONE':
return True
elif 'errorMessage' in response:
raise F5ModuleError(response['errorMessage'])
time.sleep(5)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
servers=dict(
type='list',
elements='dict',
options=dict(
address=dict(required=True),
port=dict(default=80)
)
),
inbound_virtual=dict(
type='dict',
options=dict(
address=dict(required=True),
netmask=dict(required=True),
port=dict(default=443)
)
),
redirect_virtual=dict(
type='dict',
options=dict(
address=dict(required=True),
netmask=dict(required=True),
port=dict(default=80)
)
),
service_environment=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
client_ssl_profile=dict(
type='dict',
options=dict(
name=dict(default='clientssl'),
cert_key_chain=dict(
type='raw',
options=dict(
cert=dict(),
key=dict(),
chain=dict(),
passphrase=dict()
)
)
)
),
add_analytics=dict(type='bool', default='no'),
domain_names=dict(
type='list',
elements='str',
),
wait=dict(type='bool', default='yes')
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 | -8,529,539,818,730,759,000 | 31.128876 | 113 | 0.551528 | false |
jangorecki/h2o-3 | h2o-bindings/bin/gen_R.py | 1 | 23070 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import bindings as bi
import sys
PY3 = sys.version_info[0] == 3
str_type = str if PY3 else (str, unicode)
# ----------------------------------------------------------------------------------------------------------------------
# Generate per-model classes
# ----------------------------------------------------------------------------------------------------------------------
def gen_module(schema, algo):
help_preamble = help_preamble_for(algo)
help_epilogue = help_epilogue_for(algo)
help_afterword = help_afterword_for(algo)
model_name = algo_to_modelname(algo)
help_example = help_example_for(algo)
help_return = help_return_for(algo)
help_references = help_references_for(algo)
class_extra = class_extra_for(algo)
yield "# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_R.py"
yield "# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details) \n#'"
yield "# -------------------------- %s -------------------------- #" % model_name
if help_preamble:
lines = help_preamble.split("\n")
for line in lines:
yield "#' %s" % line.lstrip()
yield "#'"
yield "#' @param x A vector containing the \code{character} names of the predictors in the model."
yield "#' If x is missing,then all columns except y are used."
yield "#' @param y The name of the response variable in the model."
for param in schema["parameters"]:
if param["name"] == "ignored_columns" or param["name"] == "response_column":
continue;
phelp = param["help"]
if param["type"] == "boolean":
phelp = "\code{Logical}. " + phelp
if param["values"]:
phelp += " Must be one of: %s." % ", ".join('"%s"' % p for p in param["values"])
if param["default_value"] is not None:
phelp += " Defaults to %s." % param["default_value"]
yield "#' @param %s %s" % (param["name"], bi.wrap(phelp, indent=("#' "), indent_first=False))
if help_return:
yield "#' @return %s" % bi.wrap(help_return, indent=("#' "), indent_first=False)
if help_epilogue:
yield "#' @seealso %s" % bi.wrap(help_epilogue, indent=("#' "), indent_first=False)
if help_references:
#lines = help_references.split("\n")
#for line in lines:
# yield "#' %s" % line.lstrip()
yield "#' @references %s" % bi.wrap(help_references, indent=("#' "), indent_first=False)
if help_example:
yield "#' @examples"
lines = help_example.split("\n")
for line in lines:
yield "#' %s" % line.lstrip()
yield "#' @export"
yield "h2o.%s <- function(x, y, " % algo
list = []
for param in schema["parameters"]:
if param["name"] == "ignored_columns" or param["name"] == "response_column":
continue;
if param["type"][:4] == "enum" or param["default_value"] is not None:
list.append(indent("%s = %s" % (param["name"], normalize_value(param)), 17 + len(algo)))
else:
list.append(indent(param["name"], 17 + len(algo)))
yield ", \n".join(list)
yield indent(") \n{", 17 + len(algo))
yield " #If x is missing, then assume user wants to use all columns as features."
yield " if(missing(x)){"
yield " if(is.numeric(y)){"
yield " x <- setdiff(col(training_frame),y)"
yield " }else{"
yield " x <- setdiff(colnames(training_frame),y)"
yield " }"
yield " }"
yield ""
yield " # Required args: training_frame"
yield " if( missing(training_frame) ) stop(\"argument \'training_frame\' is missing, with no default\")"
yield " if( missing(validation_frame) ) validation_frame = NULL"
yield " # Training_frame and validation_frame may be a key or an H2OFrame object"
yield " if (!is.H2OFrame(training_frame))"
yield " tryCatch(training_frame <- h2o.getFrame(training_frame),"
yield " error = function(err) {"
yield " stop(\"argument \'training_frame\' must be a valid H2OFrame or key\")"
yield " })"
yield " if (!is.null(validation_frame)) {"
yield " if (!is.H2OFrame(validation_frame))"
yield " tryCatch(validation_frame <- h2o.getFrame(validation_frame),"
yield " error = function(err) {"
yield " stop(\"argument \'validation_frame\' must be a valid H2OFrame or key\")"
yield " })"
yield " }"
yield " # Parameter list to send to model builder"
yield " parms <- list()"
yield " parms$training_frame <- training_frame"
yield " args <- .verify_dataxy(training_frame, x, y, autoencoder)"
yield " if( !missing(offset_column) && !is.null(offset_column)) args$x_ignore <- args$x_ignore[!( offset_column == args$x_ignore )]"
yield " if( !missing(weights_column) && !is.null(weights_column)) args$x_ignore <- args$x_ignore[!( weights_column == args$x_ignore )]"
yield " if( !missing(fold_column) && !is.null(fold_column)) args$x_ignore <- args$x_ignore[!( fold_column == args$x_ignore )]"
yield " parms$response_column <- args$y"
yield " parms$ignored_columns <- args$x_ignore \n "
for param in schema["parameters"]:
if param["name"] == "ignored_columns" or param["name"] == "response_column" or param["name"] == "training_frame":
continue;
if param["name"] == "loss":
yield " if(!missing(loss)) {"
yield " if(loss == \"MeanSquare\") {"
yield " warning(\"Loss name 'MeanSquare' is deprecated; please use 'Quadratic' instead.\")"
yield " parms$loss <- \"Quadratic\""
yield " } else "
yield " parms$loss <- loss"
yield " }"
yield " if (!missing(%s))" % param["name"]
yield " parms$%s <- %s" % (param["name"], param["name"])
yield " .h2o.modelJob('%s', parms, h2oRestApiVersion=3) \n}" % algo
if help_afterword:
lines = help_afterword.split("\n")
for line in lines:
yield "%s" % line.lstrip()
#yield "%s" % bi.wrap(help_afterword, indent=("#' "), indent_first=True)
if class_extra:
yield class_extra
def normalize_value(param):
if param["type"][:4] == "enum":
return "c(%s)" % ", ".join('"%s"' % p for p in param["values"])
if "[]" in param["type"]:
return "c(%s)" % ", ".join('%s' % p for p in param["default_value"])
if param["type"] == "boolean":
return str(param["default_value"]).upper()
return param["default_value"]
def indent(string, indent):
return " " * indent + string;
def algo_to_modelname(algo):
if algo == "deeplearning": return "Deep Learning - Neural Network"
if algo == "deepwater": return "Deep Water - Neural Network"
if algo == "gbm": return "Gradient Boosting Machine"
if algo == "glm": return "H2O Generalized Linear Models"
if algo == "glrm": return "Generalized Low Rank Model"
if algo == "kmeans": return "KMeans Model in H2O"
if algo == "naivebayes": return "Naive Bayes Model in H2O"
if algo == "drf": return "Random Forest Model in H2O"
if algo == "svd": return "Singular Value Decomposition"
if algo == "pca": return "Principal Components Analysis"
return algo
def help_preamble_for(algo):
if algo == "deeplearning":
return """
Build a Deep Neural Network model using CPUs
Builds a feed-forward multilayer artificial neural network on an H2OFrame"""
if algo == "deepwater":
return """
Build a Deep Learning model using multiple native GPU backends
Builds a deep neural network on an H2OFrame containing various data sources"""
if algo == "kmeans":
return """Performs k-means clustering on an H2O dataset."""
if algo == "glrm":
return """Builds a generalized low rank model of a H2O dataset."""
if algo == "glm":
return """
Fits a generalized linear model, specified by a response variable, a set of predictors, and a
description of the error distribution."""
if algo == "gbm":
return """
Builds gradient boosted trees on a parsed data set, for regression or classification.
The default distribution function will guess the model type based on the response column type.
Otherwise, the response column must be an enum for "bernoulli" or "multinomial", and numeric
for all other distributions."""
if algo == "naivebayes":
return """
The naive Bayes classifier assumes independence between predictor variables
conditional on the response, and a Gaussian distribution of numeric predictors with
mean and standard deviation computed from the training dataset. When building a naive
Bayes classifier, every row in the training dataset that contains at least one NA will
be skipped completely. If the test dataset has missing values, then those predictors
are omitted in the probability calculation during prediction."""
def help_epilogue_for(algo):
if algo == "deeplearning":
return """\code{\link{predict.H2OModel}} for prediction"""
if algo == "glm":
return """\code{\link{predict.H2OModel}} for prediction, \code{\link{h2o.mse}}, \code{\link{h2o.auc}}, \code{\link{h2o.confusionMatrix}}, \code{\link{h2o.performance}}, \code{\link{h2o.giniCoef}}, \code{\link{h2o.logloss}}, \code{\link{h2o.varimp}}, \code{\link{h2o.scoreHistory}}"""
if algo == "glrm":
return """\code{\link{h2o.kmeans}, \link{h2o.svd}}, \code{\link{h2o.prcomp}}"""
def help_return_for(algo):
if algo == "glrm":
return "Returns an object of class \linkS4class{H2ODimReductionModel}."
def help_references_for(algo):
if algo == "glrm":
return """M. Udell, C. Horn, R. Zadeh, S. Boyd (2014). {Generalized Low Rank Models}[http://arxiv.org/abs/1410.0342]. Unpublished manuscript, Stanford Electrical Engineering Department; N. Halko, P.G. Martinsson, J.A. Tropp. {Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions}[http://arxiv.org/abs/0909.4061]. SIAM Rev., Survey and Review section, Vol. 53, num. 2, pp. 217-288, June 2011."""
def help_example_for(algo):
if algo == "deeplearning":
return """\donttest{
library(h2o)
h2o.init()
iris.hex <- as.h2o(iris)
iris.dl <- h2o.deeplearning(x = 1:4, y = 5, training_frame = iris.hex)
# now make a prediction
predictions <- h2o.predict(iris.dl, iris.hex)
}"""
if algo == "glm":
return """\code{\link{predict.H2OModel}} for prediction, \code{\link{h2o.mse}}, \code{\link{h2o.auc}}, \code{\link{h2o.confusionMatrix}}, \code{\link{h2o.performance}}, \code{\link{h2o.giniCoef}}, \code{\link{h2o.logloss}}, \code{\link{h2o.varimp}}, \code{\link{h2o.scoreHistory}}"""
if algo == "glrm":
return """\donttest{
library(h2o)
h2o.init()
ausPath <- system.file("extdata", "australia.csv", package="h2o")
australia.hex <- h2o.uploadFile(path = ausPath)
h2o.glrm(training_frame = australia.hex, k = 5, loss = "Quadratic", regularization_x = "L1",
gamma_x = 0.5, gamma_y = 0, max_iterations = 1000)
}"""
def help_afterword_for(algo):
if algo == "deeplearning":
return """
#' Anomaly Detection via H2O Deep Learning Model
#'
#' Detect anomalies in an H2O dataset using an H2O deep learning model with
#' auto-encoding.
#'
#' @param object An \linkS4class{H2OAutoEncoderModel} object that represents the
#' model to be used for anomaly detection.
#' @param data An H2OFrame object.
#' @param per_feature Whether to return the per-feature squared reconstruction error
#' @return Returns an H2OFrame object containing the
#' reconstruction MSE or the per-feature squared error.
#' @seealso \code{\link{h2o.deeplearning}} for making an H2OAutoEncoderModel.
#' @examples
#' \donttest{
#' library(h2o)
#' h2o.init()
#' prosPath = system.file("extdata", "prostate.csv", package = "h2o")
#' prostate.hex = h2o.importFile(path = prosPath)
#' prostate.dl = h2o.deeplearning(x = 3:9, training_frame = prostate.hex, autoencoder = TRUE,
#' hidden = c(10, 10), epochs = 5)
#' prostate.anon = h2o.anomaly(prostate.dl, prostate.hex)
#' head(prostate.anon)
#' prostate.anon.per.feature = h2o.anomaly(prostate.dl, prostate.hex, per_feature=TRUE)
#' head(prostate.anon.per.feature)
#' }
#' @export
h2o.anomaly <- function(object, data, per_feature=FALSE) {
url <- paste0('Predictions/models/', object@model_id, '/frames/',h2o.getId(data))
res <- .h2o.__remoteSend(url, method = "POST", reconstruction_error=TRUE, reconstruction_error_per_feature=per_feature)
key <- res$model_metrics[[1L]]$predictions$frame_id$name
h2o.getFrame(key)
}
#' Feature Generation via H2O Deep Learning Model
#'
#' Extract the non-linear feature from an H2O data set using an H2O deep learning
#' model.
#' @param object An \linkS4class{H2OModel} object that represents the deep
#' learning model to be used for feature extraction.
#' @param data An H2OFrame object.
#' @param layer Index of the hidden layer to extract.
#' @return Returns an H2OFrame object with as many features as the
#' number of units in the hidden layer of the specified index.
#' @seealso \code{link{h2o.deeplearning}} for making deep learning models.
#' @examples
#' \donttest{
#' library(h2o)
#' h2o.init()
#' prosPath = system.file("extdata", "prostate.csv", package = "h2o")
#' prostate.hex = h2o.importFile(path = prosPath)
#' prostate.dl = h2o.deeplearning(x = 3:9, y = 2, training_frame = prostate.hex,
#' hidden = c(100, 200), epochs = 5)
#' prostate.deepfeatures_layer1 = h2o.deepfeatures(prostate.dl, prostate.hex, layer = 1)
#' prostate.deepfeatures_layer2 = h2o.deepfeatures(prostate.dl, prostate.hex, layer = 2)
#' head(prostate.deepfeatures_layer1)
#' head(prostate.deepfeatures_layer2)
#' }
#' @export
h2o.deepfeatures <- function(object, data, layer = 1) {
index = layer - 1
url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(data))
res <- .h2o.__remoteSend(url, method = "POST", deep_features_hidden_layer=index, h2oRestApiVersion = 4)
job_key <- res$key$name
dest_key <- res$dest$name
.h2o.__waitOnJob(job_key)
h2o.getFrame(dest_key)
}
"""
if algo == "glm":
return """
#' Set betas of an existing H2O GLM Model
#'
#' This function allows setting betas of an existing glm model.
#' @param model an \linkS4class{H2OModel} corresponding from a \code{h2o.glm} call.
#' @param beta a new set of betas (a named vector)
#' @export
h2o.makeGLMModel <- function(model,beta) {
res = .h2o.__remoteSend(method="POST", .h2o.__GLMMakeModel, model=model@model_id, names = paste("[",paste(paste("\"",names(beta),"\"",sep=""), collapse=","),"]",sep=""), beta = paste("[",paste(as.vector(beta),collapse=","),"]",sep=""))
m <- h2o.getModel(model_id=res$model_id$name)
m@model$coefficients <- m@model$coefficients_table[,2]
names(m@model$coefficients) <- m@model$coefficients_table[,1]
m
}
#' Extract full regularization path from glm model (assuming it was run with lambda search option)
#'
#' @param model an \linkS4class{H2OModel} corresponding from a \code{h2o.glm} call.
#' @export
h2o.getGLMFullRegularizationPath <- function(model) {
res = .h2o.__remoteSend(method="GET", .h2o.__GLMRegPath, model=model@model_id)
colnames(res$coefficients) <- res$coefficient_names
if(!is.null(res$coefficients_std) && length(res$coefficients_std) > 0L) {
colnames(res$coefficients_std) <- res$coefficient_names
}
res
}
##' Start an H2O Generalized Linear Model Job
##'
##' Creates a background H2O GLM job.
##' @inheritParams h2o.glm
##' @return Returns a \linkS4class{H2OModelFuture} class object.
##' @export
#h2o.startGLMJob <- function(x, y, training_frame, model_id, validation_frame,
# #AUTOGENERATED Params
# max_iterations = 50,
# beta_epsilon = 0,
# solver = c("IRLSM", "L_BFGS"),
# standardize = TRUE,
# family = c("gaussian", "binomial", "poisson", "gamma", "tweedie"),
# link = c("family_default", "identity", "logit", "log", "inverse", "tweedie"),
# tweedie_variance_power = NaN,
# tweedie_link_power = NaN,
# alpha = 0.5,
# prior = 0.0,
# lambda = 1e-05,
# lambda_search = FALSE,
# nlambdas = -1,
# lambda_min_ratio = 1.0,
# nfolds = 0,
# beta_constraints = NULL,
# ...
# )
#{
# # if (!is.null(beta_constraints)) {
# # if (!inherits(beta_constraints, "data.frame") && !is.H2OFrame("H2OFrame"))
# # stop(paste("`beta_constraints` must be an H2OH2OFrame or R data.frame. Got: ", class(beta_constraints)))
# # if (inherits(beta_constraints, "data.frame")) {
# # beta_constraints <- as.h2o(beta_constraints)
# # }
# # }
#
# if (!is.H2OFrame(training_frame))
# tryCatch(training_frame <- h2o.getFrame(training_frame),
# error = function(err) {
# stop("argument \"training_frame\" must be a valid H2OFrame or model ID")
# })
#
# parms <- list()
# args <- .verify_dataxy(training_frame, x, y)
# parms$ignored_columns <- args$x_ignore
# parms$response_column <- args$y
# parms$training_frame = training_frame
# parms$beta_constraints = beta_constraints
# if(!missing(model_id))
# parms$model_id <- model_id
# if(!missing(validation_frame))
# parms$validation_frame <- validation_frame
# if(!missing(max_iterations))
# parms$max_iterations <- max_iterations
# if(!missing(beta_epsilon))
# parms$beta_epsilon <- beta_epsilon
# if(!missing(solver))
# parms$solver <- solver
# if(!missing(standardize))
# parms$standardize <- standardize
# if(!missing(family))
# parms$family <- family
# if(!missing(link))
# parms$link <- link
# if(!missing(tweedie_variance_power))
# parms$tweedie_variance_power <- tweedie_variance_power
# if(!missing(tweedie_link_power))
# parms$tweedie_link_power <- tweedie_link_power
# if(!missing(alpha))
# parms$alpha <- alpha
# if(!missing(prior))
# parms$prior <- prior
# if(!missing(lambda))
# parms$lambda <- lambda
# if(!missing(lambda_search))
# parms$lambda_search <- lambda_search
# if(!missing(nlambdas))
# parms$nlambdas <- nlambdas
# if(!missing(lambda_min_ratio))
# parms$lambda_min_ratio <- lambda_min_ratio
# if(!missing(nfolds))
# parms$nfolds <- nfolds
#
# .h2o.startModelJob('glm', parms, h2oRestApiVersion=.h2o.__REST_API_VERSION)
#}
"""
def class_extra_for(algo):
if algo == "deepwater":
return """
# Ask the H2O server whether a Deep Water model can be built (depends on availability of native backends)
#' Returns True if a deep water model can be built, or False otherwise.
#' @param h2oRestApiVersion (Optional) Specific version of the REST API to use
#'
h2o.deepwater.available <- function(h2oRestApiVersion = .h2o.__REST_API_VERSION) {
visibility = .h2o.__remoteSend(method = "GET", h2oRestApiVersion = h2oRestApiVersion, .h2o.__MODEL_BUILDERS("deepwater"))$model_builders[["deepwater"]][["visibility"]]
if (visibility == "Experimental") {
print("Cannot build a Deep Water model - no backend found.")
return(FALSE)
} else {
return(TRUE)
}
}
"""
# ----------------------------------------------------------------------------------------------------------------------
# MAIN:
# ----------------------------------------------------------------------------------------------------------------------
def main():
bi.init("R", "../../../h2o-r/h2o-package/R", clear_dir=False)
for name, mb in bi.model_builders().items():
module = name
if name == "drf": module = "random_forest"
if name == "naivebayes": module = "naive_bayes"
bi.vprint("Generating model: " + name)
if name == "deepwater" or name == "deeplearning":
print("Generating model:" + module)
bi.write_to_file("%s.R" % module, gen_module(mb, name))
if __name__ == "__main__":
main()
| apache-2.0 | -1,986,338,802,887,520,300 | 50.726457 | 460 | 0.539358 | false |
Vutshi/qutip | examples/cavity-qubit-correlation-3.py | 1 | 1743 | #
# Example: Calculate the correlation function for a cavity that is coupled to
# a qubit.
#
from qutip import *
from pylab import *
import time
import warnings
warnings.simplefilter("error", np.ComplexWarning)
def calc_correlation(E, kappa, gamma, g, wc, w0, wl, N, tlist):
#
# returns the two-time correlation of the intracavity field as exponential
# series for the problem of a coherently driven cavity with a two-level atom
#
# E = amplitude of driving field, kappa = mirror coupling,
# gamma = spontaneous emission rate, g = atom-field coupling,
# wc = cavity frequency, w0 = atomic frequency, wl = driving field frequency,
# N = size of Hilbert space for intracavity field (zero to N-1 photons)
#
# Define cavity field and atomic operators
a = tensor(destroy(N),qeye(2))
sm = tensor(qeye(N),sigmam())
# Hamiltonian
H = (w0-wl)*sm.dag()*sm + (wc-wl)*a.dag()*a + 1j*g*(a.dag()*sm - sm.dag()*a) + E*(a.dag()+a)
# collapse operators
C1=sqrt(2*kappa)*a
C2=sqrt(gamma)*sm.dag()
A = a
corr_ode = correlation_ss(H, tlist, [C1, C2], A.dag(), A, solver="me")
corr_es = correlation_ss(H, tlist, [C1, C2], A.dag(), A, solver="es")
print "real corr at 0 [ode]:", corr_ode[0]
print "real corr at 0 [es] :", corr_es[0]
return corr_ode, corr_es
#
#
#
kappa = 2
gamma = 0.2
g = 5
wc = 0
w0 = 0
wl = 0
N = 5
E = 0.5
tlist = linspace(0,10.0,500)
start_time=time.time()
corr1, corr2 = calc_correlation(E, kappa, gamma, g, wc, w0, wl, N, tlist)
print 'time elapsed (probcorr) = ' +str(time.time()-start_time)
figure(1)
plot(tlist,real(corr1), tlist, real(corr2))
xlabel('Time')
ylabel('Correlation <a^\dag(t)a(0)>')
legend(("ode", "es"))
show()
| gpl-3.0 | -8,360,645,354,519,356,000 | 25.014925 | 96 | 0.634538 | false |
RobertoPrevato/PythonLab | spawning/cytlistmethod/opti_instantiating.py | 1 | 2531 | """
Tests the performance of instantiating SpawnItem implemented inheriting from lists and using
for loop optimized in Cython.
https://github.com/RobertoPrevato/PythonLab
Copyright 2017, Roberto Prevato
https://robertoprevato.github.io
Licensed under the MIT license:
http://www.opensource.org/licenses/MIT
"""
from cytlistmethod import instantiating
if __name__ == "__main__":
import cProfile
cProfile.run("instantiating.create_instances()", sort="time")
"""
Examples:
------------------------------------------------------------------------------------------------------------------------
1000004 function calls in 1.795 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
1000000 1.115 0.000 1.115 0.000 spawnitem.py:21(__init__)
1 0.680 0.680 1.795 1.795 {cytlistmethod.instantiating.create_instances}
1 0.000 0.000 1.795 1.795 {built-in method builtins.exec}
1 0.000 0.000 1.795 1.795 <string>:1(<module>)
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
------------------------------------------------------------------------------------------------------------------------
1000004 function calls in 1.814 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
1000000 1.144 0.000 1.144 0.000 spawnitem.py:21(__init__)
1 0.670 0.670 1.814 1.814 {cytlistmethod.instantiating.create_instances}
1 0.000 0.000 1.814 1.814 {built-in method builtins.exec}
1 0.000 0.000 1.814 1.814 <string>:1(<module>)
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
------------------------------------------------------------------------------------------------------------------------
1000004 function calls in 1.792 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
1000000 1.122 0.000 1.122 0.000 spawnitem.py:21(__init__)
1 0.670 0.670 1.792 1.792 {cytlistmethod.instantiating.create_instances}
1 0.000 0.000 1.792 1.792 {built-in method builtins.exec}
1 0.000 0.000 1.792 1.792 <string>:1(<module>)
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
""" | mit | 682,365,911,667,337,700 | 45.036364 | 120 | 0.540103 | false |
hufsm/tu_gen2_libsigrokdecode | decoders/ps2/pd.py | 1 | 4017 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2016 Daniel Schulte <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
from collections import namedtuple
class Ann:
BIT, START, STOP, PARITY_OK, PARITY_ERR, DATA, WORD = range(7)
Bit = namedtuple('Bit', 'val ss es')
class Decoder(srd.Decoder):
api_version = 3
id = 'ps2'
name = 'PS/2'
longname = 'PS/2'
desc = 'PS/2 keyboard/mouse interface.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['ps2']
channels = (
{'id': 'clk', 'name': 'Clock', 'desc': 'Clock line'},
{'id': 'data', 'name': 'Data', 'desc': 'Data line'},
)
annotations = (
('bit', 'Bit'),
('start-bit', 'Start bit'),
('stop-bit', 'Stop bit'),
('parity-ok', 'Parity OK bit'),
('parity-err', 'Parity error bit'),
('data-bit', 'Data bit'),
('word', 'Word'),
)
annotation_rows = (
('bits', 'Bits', (0,)),
('fields', 'Fields', (1, 2, 3, 4, 5, 6)),
)
def __init__(self):
self.reset()
def reset(self):
self.bits = []
self.samplenum = 0
self.bitcount = 0
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def putb(self, bit, ann_idx):
b = self.bits[bit]
self.put(b.ss, b.es, self.out_ann, [ann_idx, [str(b.val)]])
def putx(self, bit, ann):
self.put(self.bits[bit].ss, self.bits[bit].es, self.out_ann, ann)
def handle_bits(self, datapin):
# Ignore non start condition bits (useful during keyboard init).
if self.bitcount == 0 and datapin == 1:
return
# Store individual bits and their start/end samplenumbers.
self.bits.append(Bit(datapin, self.samplenum, self.samplenum))
# Fix up end sample numbers of the bits.
if self.bitcount > 0:
b = self.bits[self.bitcount - 1]
self.bits[self.bitcount - 1] = Bit(b.val, b.ss, self.samplenum)
if self.bitcount == 11:
self.bitwidth = self.bits[1].es - self.bits[2].es
b = self.bits[-1]
self.bits[-1] = Bit(b.val, b.ss, b.es + self.bitwidth)
# Find all 11 bits. Start + 8 data + odd parity + stop.
if self.bitcount < 11:
self.bitcount += 1
return
# Extract data word.
word = 0
for i in range(8):
word |= (self.bits[i + 1].val << i)
# Calculate parity.
parity_ok = (bin(word).count('1') + self.bits[9].val) % 2 == 1
# Emit annotations.
for i in range(11):
self.putb(i, Ann.BIT)
self.putx(0, [Ann.START, ['Start bit', 'Start', 'S']])
self.put(self.bits[1].ss, self.bits[8].es, self.out_ann, [Ann.WORD,
['Data: %02x' % word, 'D: %02x' % word, '%02x' % word]])
if parity_ok:
self.putx(9, [Ann.PARITY_OK, ['Parity OK', 'Par OK', 'P']])
else:
self.putx(9, [Ann.PARITY_ERR, ['Parity error', 'Par err', 'PE']])
self.putx(10, [Ann.STOP, ['Stop bit', 'Stop', 'St', 'T']])
self.bits, self.bitcount = [], 0
def decode(self):
while True:
# Sample data bits on falling clock edge.
clock_pin, data_pin = self.wait({0: 'f'})
self.handle_bits(data_pin)
| gpl-3.0 | 7,013,878,939,315,223,000 | 32.198347 | 77 | 0.560368 | false |
danielballan/vistools | setup.py | 1 | 1761 | from __future__ import (absolute_import, division, print_function)
import sys
import warnings
try:
from setuptools import setup
except ImportError:
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
from distutils.core import setup, Extension
import numpy
MAJOR = 0
MINOR = 0
MICRO = 0
ISRELEASED = False
SNAPSHOT = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
FULLVERSION = VERSION
print(FULLVERSION)
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
if SNAPSHOT:
pipe = None
for cmd in ['git', 'git.cmd']:
try:
pipe = subprocess.Popen([cmd, "describe", "--always",
"--match", "v[0-9\/]*"],
stdout=subprocess.PIPE)
(so, serr) = pipe.communicate()
print(so, serr)
if pipe.returncode == 0:
pass
print('here')
except:
pass
if pipe is None or pipe.returncode != 0:
warnings.warn("WARNING: Couldn't get git revision, "
"using generic version string")
else:
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
# use result of git describe as version string
FULLVERSION = VERSION + '-' + rev.lstrip('v')
break
else:
FULLVERSION += QUALIFIER
setup(
name='vistools',
version=FULLVERSION,
author='Brookhaven National Lab',
packages=['vistools'],
)
| bsd-3-clause | 2,885,555,588,982,058,000 | 26.092308 | 69 | 0.523566 | false |
ronin-gw/PyMaSC | PyMaSC/utils/progress.py | 1 | 7404 | import sys
import array
import fcntl
from termios import TIOCGWINSZ
class ProgressBase(object):
global_switch = False
@classmethod
def _pass(self, *args, **kwargs):
pass
class ProgressBar(ProgressBase):
def __init__(self, body="<1II1>" * 12, prefix='>', suffix='<', output=sys.stderr):
self.body = body
self.fmt = "\r" + prefix + "{:<" + str(len(body)) + "}" + suffix
self.output = output
if self.global_switch:
self.enable_bar()
else:
self.disable_bar()
def enable_bar(self):
if self.global_switch:
self.format = self._format
self.clean = self._clean
self.update = self._update
def disable_bar(self):
self.format = self.clean = self.update = self._pass
def _format(self, s):
self.output.write(self.fmt.format(s))
def _clean(self):
self.output.write("\r\033[K")
self.output.flush()
def set(self, name, maxval):
self._unit = float(maxval) / len(self.body)
self.pos = 0
self._next_update = self._unit
self.format('')
def _update(self, val):
if val > self._next_update:
while val > self._next_update:
self.pos += 1
self._next_update += self._unit
self.format(self.body[:self.pos])
class ProgressHook(ProgressBar):
def __init__(self, queue, body="<1II1>" * 12, prefix='>', suffix='<'):
super(ProgressHook, self).__init__(body, prefix, suffix, queue)
self.name = None
def _clean(self):
pass
def set(self, name, maxval):
self.name = name
super(ProgressHook, self).set(name, maxval)
def _format(self, s):
self.output.put((
None, (self.name, self.fmt.format(s))
))
class MultiLineProgressManager(ProgressBase):
def __init__(self, output=sys.stderr):
#
self.output = output
if not self.output.isatty():
self.global_switch = False
#
if not self.global_switch:
self.erase = self.clean = self.update = self._pass
return None
# get terminal size
buf = array.array('H', ([0] * 4))
stdfileno = self.output.fileno()
fcntl.ioctl(stdfileno, TIOCGWINSZ, buf, 1)
self.max_height, self.max_width = buf[:2]
self.max_width -= 1
#
self.key2lineno = {}
self.lineno2key = {}
self.key2body = {}
self.nlines = 0
def _cursor_n(self, n, ctrl):
if n < 1:
return None
self._write("\033[{}{}".format(n, ctrl))
def _down(self, n):
return self._cursor_n(n, ctrl="E")
def _up(self, n):
return self._cursor_n(n, ctrl="F")
def _reset_line(self):
self._write("\033[K")
def _write(self, l):
self.output.write(l[:self.max_width])
self.output.flush()
def _refresh_lines(self, from_, to):
for i in range(from_, to):
k = self.lineno2key[i]
self._reset_line()
self._write("{} {}".format(self.key2body[k], k))
if i < self.nlines:
self._write('\n')
def update(self, key, body):
if key not in self.key2lineno:
self.nlines += 1
lineno = self.key2lineno[key] = self.nlines
self.lineno2key[lineno] = key
else:
lineno = self.key2lineno[key]
self.key2body[key] = body
self._refresh_lines(1, self.nlines + 1)
self._up(self.nlines - 1)
if self.nlines == 1:
self._write("\033[G")
def erase(self, key):
try:
lineno = self.key2lineno[key]
except KeyError:
return None
self._refresh_lines(1, lineno)
if lineno == self.nlines:
self._reset_line()
for i in range(lineno + 1, self.nlines + 1):
k = self.lineno2key[i - 1] = self.lineno2key[i]
self.key2lineno[k] -= 1
self._write("{} {}".format(self.key2body[k], k))
self._write('\n')
self.nlines -= 1
self._reset_line()
self._up(self.nlines)
if self.nlines == 1:
self._write("\033[G")
del self.key2lineno[key], self.key2body[key]
def clean(self):
self._reset_line()
for i in range(self.nlines - 1):
self._down(1)
self._reset_line()
for i in range(self.nlines - 1):
self._up(1)
class ReadCountProgressBar(ProgressBar, MultiLineProgressManager):
def __init__(self, g_body="^@@@@@@@@@" * 10, g_prefix='', g_suffix='^',
c_body="<1II1>" * 12, c_prefix='>', c_suffix='< {}', output=sys.stderr):
MultiLineProgressManager.__init__(self, output)
if not self.global_switch:
self.set_chrom = self.set_genome = self.update = self.finish = self._pass
return None
self.genome_body = g_body
self.genome_fmt = g_prefix + "{:<" + str(len(g_body)) + "}" + g_suffix
self.chrom_body = c_body
self.chrom_fmt = c_prefix + "{:<" + str(len(c_body)) + "}" + c_suffix
if self.global_switch:
self.enable_bar()
else:
self.disable_bar()
self.output = output
self._genome_offset = None
def enable_bar(self):
if self.global_switch:
self.set_chrom = self._set_chrom
self.set_genome = self._set_genome
self.finish = self._finish
self.update = self._update
def disable_bar(self):
self.set_chrom = self.set_genome = self.finish = self.update = self._pass
def _set_chrom(self, maxval, name):
if self._genome_offset is None:
self._genome_offset = 0
else:
self._genome_offset += self._chrom_maxval
self._chrom = name
self._chrom_maxval = maxval
self._chrom_unit = float(maxval) / len(self.chrom_body)
self.chrom_pos = 0
self._chrom_next_update = self._chrom_unit
self._reset_line()
self._write(self.chrom_fmt.format('', self._chrom))
self._write('\n')
self._reset_line()
self._write(self.genome_fmt.format(self.genome_body[:self.genome_pos]))
self._up(1)
def _set_genome(self, maxval):
self._genome_unit = float(maxval) / len(self.genome_body)
self.genome_pos = 0
self._genome_next_update = self._genome_unit
def _update(self, val):
if val > self._chrom_next_update:
while val > self._chrom_next_update:
self.chrom_pos += 1
self._chrom_next_update += self._chrom_unit
self._write(self.chrom_fmt.format(self.chrom_body[:self.chrom_pos], self._chrom))
self._write('\n')
if val + self._genome_offset > self._genome_next_update:
while val + self._genome_offset > self._genome_next_update:
self.genome_pos += 1
self._genome_next_update += self._genome_unit
self._write(self.genome_fmt.format(self.genome_body[:self.genome_pos]))
self._up(1)
self._write("\033[G")
def _finish(self):
self._down(1)
self._reset_line()
self._up(1)
self._reset_line()
| mit | -6,938,976,517,183,165,000 | 28.498008 | 93 | 0.53201 | false |
linkhub-sdk/popbill.taxinvoice.example.py | sendEmail.py | 1 | 1538 | # -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
imp.reload(sys)
try: sys.setdefaultencoding('UTF8')
except Exception as E: pass
import testValue
from popbill import TaxinvoiceService, PopbillException
taxinvoiceService = TaxinvoiceService(testValue.LinkID, testValue.SecretKey)
taxinvoiceService.IsTest = testValue.IsTest
taxinvoiceService.IPRestrictOnOff = testValue.IPRestrictOnOff
taxinvoiceService.UseStaticIP = testValue.UseStaticIP
taxinvoiceService.UseLocalTimeYN = testValue.UseLocalTimeYN
'''
세금계산서 발행 안내메일을 재전송합니다.
- https://docs.popbill.com/taxinvoice/python/api#SendEmail
'''
try:
print("=" * 15 + " 발행안내메일 재전송 " + "=" * 15)
# 팝빌회원 사업자번호
CorpNum = testValue.testCorpNum
# 세금계산서 발행유형, SELL : 매출 , BUY : 매입 , TRUSTEE : 수탁
MgtKeyType = "SELL"
# 문서번호
MgtKey = "20210429-001"
# 수신자 메일주소
# 팝빌 개발환경에서 테스트하는 경우에도 안내 메일이 전송되므로,
# 실제 거래처의 메일주소가 기재되지 않도록 주의
ReceiverMail = "[email protected]"
# 팝빌회원 아이디
UserID = testValue.testUserID
result = taxinvoiceService.sendEmail(CorpNum, MgtKeyType, MgtKey, ReceiverMail, UserID)
print("처리결과 : [%d] %s" % (result.code,result.message))
except PopbillException as PE:
print("Popbill Exception : [%d] %s" % (PE.code , PE.message))
| mit | -3,307,818,872,383,219,000 | 26.041667 | 91 | 0.714946 | false |
wavesoft/fairroot-benchmark | benchmark/runner.py | 1 | 8118 |
import json
import copy
import time
import signal
from benchmark.monitor import TestMonitor
from benchmark.launcher import TestLauncher
def _recursive_macro_replace(val, macros):
"""
Iterate over the items of val and replace the macros with
the properties of macros dictionary
"""
# Iterate over dicts
if isinstance(val, dict):
for k,v in val.iteritems():
val[k] = _recursive_macro_replace(v, macros)
return val
# Iterate over lists
elif isinstance(val, list):
for i in range(0, len(val)):
val[i] = _recursive_macro_replace(val[i], macros)
return val
# Replace only strings
elif (type(val) is str) or (type(val) is unicode):
if '%' in val:
return val % macros
return val
# Everything else passes through
else:
return val
class TestRunner:
def __init__(self, test_case):
"""
Initialize the runner for the specified test case
"""
# Reset properties
self.lastError = ""
self.testCase = test_case
# Prepare launcher for monitor machine
self.m_monitor = self.prepareLauncherConfig(
test_case.local, test_case.remote, test_case.values
)
# Prepare launcher for assistant machines
self.m_assist = []
for m in test_case.remote:
self.m_assist.append(
self.prepareLauncherConfig(
m, [test_case.local], test_case.values
)
)
def prepareLauncherConfig(self, machine, remote_machines, values):
"""
Prepare launcher configuration for the specified machine and test-case
"""
# Collect information from various sources to build known macros
km = machine.app['env'].copy()
km.update( values )
if 'globals' in self.testCase.config:
km.update( self.testCase.config['globals'] )
# Define local info
km['local_ip'] = machine.ip
km['local_name'] = machine.name
# Define remote info
for i in range(0, len(remote_machines)):
# Unprefixed, is the last EP
km['remote_ip'] = remote_machines[i].ip
km['remote_name'] = remote_machines[i].name
# But also include a list of EPs
km['remote_%i_ip' % i] = remote_machines[i].ip
km['remote_%i_name' % i] = remote_machines[i].name
# Calculate some derrivatives (Allow 8 pending messages on the queue)
mul = 1
if 'queue-size' in self.testCase.config['globals']:
mul = int(self.testCase.config['globals']['queue-size'])
km['rxtx_size_plus'] = int(km['rxtx_size']) * mul
km['rxtx_size_minus'] = int(km['rxtx_size']) / mul
############################
# Compile environment
############################
# Start with empty
l_env = {}
# Iterate over environment variables
for k,v in machine.app['env'].iteritems():
# Replace macros in the value
value = v
if '%' in value:
value = value % km
# Update env variable AND the known macros
l_env[k] = value
km[k] = value
############################
# Compile Configuration
############################
# Recursively replace macros
l_conf = copy.deepcopy( machine.app['config'] )
_recursive_macro_replace( l_conf, km )
############################
# Compile command-line
############################
# Just clone the base command-line
l_cmdline = list(machine.app['cmdline'])
# Prepend the executable to run (if specified)
if ('exec' in machine.app) and machine.app['exec']:
l_cmdline.insert(0, machine.app['exec'])
# Append configuration file flag
l_cmdline.append( machine.app['config_arg'] )
# Convert macros
l_cmdline = _recursive_macro_replace( l_cmdline, km )
print "DEBUG: Executing %s <config>" % " ".join(l_cmdline)
print "DEBUG: Config:\n%s" % json.dumps(l_conf, indent=4, separators=(',', ': '))
############################
# Compile bootstrap script
############################
# Prepare script
l_script = "\n".join([
"#!/bin/bash",
# Create a temporary were to keep the config
"CONF=/tmp/fairmq-benchmark.json",
# Write down the config
"cat <<EOF > $CONF",
json.dumps(l_conf, indent=4, separators=(',', ': ')),
"EOF",
# Prepare environment
"\n".join(map(lambda kv: "export %s=%s" % kv, l_env.iteritems())),
# Start memory monitor
#"function mem_mon {",
#" while true; do",
#" free",
#"free -m",
#"top -bn1 | grep load",
#" sleep 1",
#" done",
#"}",
#"mem_mon&",
"function mem_monitor {",
"local MONITOR_PID=0",
"local PS_DETAILS=0",
"local STATUS_DETAILS=0",
"while true; do",
"MONITOR_PID=$(pidof " + l_cmdline[0] + ")",
"PID_NAME=$(ps -p $MONITOR_PID -o comm=)",
"echo " + l_cmdline[0],
"echo $PID_NAME",
"[ -z \"$MONITOR_PID\" ] && continue",
"local PS_DETAILS=$(ps up $MONITOR_PID | tail -n1)",
"echo PS_INFO: $PS_DETAILS",
#Memory stats
"local STATUS_DETAILS=$(smem -P $PID_NAME | tail -n1)",
"echo STAT_INFO: $STATUS_DETAILS",
"sleep 1",
#Network stats
"local NET_STAT_RX=$(netstat -Ienp7s0f0 -e | awk 'NR==6{print $5}')",
"local NET_STAT_TX=$(netstat -Ienp7s0f0 -e | awk 'NR==8{print $5}')",
"echo \"NTSTAT_RX: $NET_STAT_RX\"",
"echo \"NTSTAT_TX: $NET_STAT_TX\"",
#CPU stats
"local CPU_PERC=$(ps -p $MONITOR_PID -o %cpu,%mem,cmd)",
"echo CPU: $CPU_PERC",
"done",
"}",
"mem_monitor&",
"PIDS=\"$!\"",
# Execute command-line
"stdbuf -i0 -o0 -e0 " + " ".join(l_cmdline) + " $CONF&",
"APPPID=$!",
"PIDS=\"$PIDS $APPPID\"",
# Register a SIGINT handler
"function cleanup {",
"echo \"Killing monitor and process $PIDS\" >&2",
"kill -SIGINT $PIDS",
"sleep 1",
"kill $PIDS",
"exit 1",
"}",
"trap cleanup SIGINT SIGHUP",
# Wait for main process to finish
"echo \"Waiting for main application ($APPPID) to exit\"",
"wait $APPPID",
"cleanup",
])
############################
# Prepare bootstrap command
############################
# Prepare command-line
l_bootstrap = ["bash"]
# In case of SSH, prefix with SSH
if 'ssh' in machine.config:
_ssh = machine.config['ssh']
# Calculate ssh info to prepend
ssh_cmdline = [ 'ssh', '-t' ]
# Check for identity file
if 'key' in _ssh:
ssh_cmdline.append( "-i" )
ssh_cmdline.append( _ssh['key'] )
# Get host
if 'host' in _ssh:
host = _ssh['host']
else:
host = machine.ip
# Get user
if 'user' in _ssh:
host = "%s@%s" % (_ssh['user'], host)
# Finalize cmdline
ssh_cmdline.append( host )
ssh_cmdline.append( "--" )
# Prepend to l_bootstrap
ssh_cmdline.extend( l_bootstrap )
l_bootstrap = ssh_cmdline
# Return config
return (machine.name, l_bootstrap, l_script)
def run(self):
"""
Start the test and return the results, or None if an error occured
"""
print "--[ %s ]-----" % self.testCase.name
# Create a test monitor
monitor = TestMonitor( self.testCase )
# Create launchers
launchers = [ TestLauncher( *self.m_monitor, monitor=monitor ) ]
for m in self.m_assist:
launchers.append( TestLauncher( *m ) )
# Start launchers
monitor.start()
for l in launchers:
print "INFO: Starting app on %s" % l.name
l.start()
time.sleep(0.5)
# Wait for head process to exit
print "INFO: Waiting head worker to complete"
launchers[0].join()
print "INFO: Head worker completed"
# Wait 5 seconds for other threads to exit
hasAlive = True
timeout = time.time() + 5
while (time.time() < timeout) and hasAlive:
hasAlive = False
for i in range(1,len(launchers)):
if launchers[i].poll() is None:
hasAlive = True
time.sleep(0.5)
# Kill incomplete threads
if hasAlive:
print "INFO: Forcefully stopping remaining workers"
for i in range(1,len(launchers)):
if launchers[i].poll() is None:
launchers[i].interrupt()
# Join all threads
print "INFO: Joining remaining workers"
for i in range(1,len(launchers)):
if launchers[i].poll() is None:
launchers[i].join()
# Collect monitor results
monitor.close()
return monitor.metrics()
| mit | -1,450,669,214,250,994,200 | 24.528302 | 83 | 0.591155 | false |
SphinxKnight/kuma | kuma/core/tests/test_templates.py | 1 | 2973 | from __future__ import unicode_literals
import os
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.template.backends.jinja2 import Jinja2
from django.template.loader import render_to_string
from django.test import RequestFactory
from django.utils import translation
from pyquery import PyQuery as pq
from . import KumaTestCase
class MockRequestTests(KumaTestCase):
"""Base class for tests that need a mock request"""
rf = RequestFactory()
def setUp(self):
super(MockRequestTests, self).setUp()
self.user = AnonymousUser()
self.request = self.rf.get('/')
self.request.user = self.user
self.request.LANGUAGE_CODE = 'en-US'
class BaseTemplateTests(MockRequestTests):
"""Tests for base.html"""
def setUp(self):
super(BaseTemplateTests, self).setUp()
self.template = 'base.html'
def test_no_dir_attribute(self):
html = render_to_string(self.template, request=self.request)
doc = pq(html)
dir_attr = doc('html').attr['dir']
assert 'ltr' == dir_attr
def test_rtl_dir_attribute(self):
translation.activate('ar')
html = render_to_string(self.template, request=self.request)
doc = pq(html)
dir_attr = doc('html').attr['dir']
assert 'rtl' == dir_attr
def test_lang_switcher(self):
translation.activate("bn-BD")
html = render_to_string(self.template, request=self.request)
doc = pq(html)
# Check default locale is in the first choice field
first_field = doc("#language.autosubmit option")[0].text_content()
assert settings.LANGUAGE_CODE in first_field
class ErrorListTests(MockRequestTests):
"""Tests for errorlist.html, which renders form validation errors."""
def setUp(self):
super(ErrorListTests, self).setUp()
params = {
'DIRS': [os.path.join(settings.ROOT, 'jinja2')],
'APP_DIRS': True,
'NAME': 'jinja2',
'OPTIONS': {},
}
self.engine = Jinja2(params)
def test_escaping(self):
"""Make sure we escape HTML entities, lest we court XSS errors."""
class MockForm(object):
errors = True
auto_id = 'id_'
def __iter__(self):
return iter(self.visible_fields())
def visible_fields(self):
return [{'errors': ['<"evil&ness-field">']}]
def non_field_errors(self):
return ['<"evil&ness-non-field">']
source = ("""{% from "includes/error_list.html" import errorlist %}"""
"""{{ errorlist(form) }}""")
context = {'form': MockForm()}
html = self.engine.from_string(source).render(context)
assert '<"evil&ness' not in html
assert '<"evil&ness-field">' in html
assert '<"evil&ness-non-field">' in html
| mpl-2.0 | 7,533,660,249,702,623,000 | 31.315217 | 78 | 0.608476 | false |
okfn-brasil/gastos_abertos | manage.py | 1 | 2907 | # -*- coding: utf-8 -*-
from flask.ext.script import Manager, Shell
from gastosabertos.extensions import db
from gastosabertos import create_app
manager = Manager(create_app)
manager.add_option('-i', '--inst', dest='instance_folder', required=False)
manager.add_command('shell', Shell(make_context=lambda: {
'app': manager.app,
'db': db,
}))
@manager.command
@manager.option('-h', '--host', help='Host')
def run(host='127.0.0.1'):
"""Run in local machine."""
manager.app.run(host=host)
@manager.command
def test():
"""Run tests."""
return
@manager.command
def initdb():
"""Init or reset database"""
db.drop_all()
db.create_all()
def _importrevenue():
"""Import revenue data to the database"""
from utils.import_revenue_codes import import_codes
from utils import import_revenue
# Revenue
import_codes(db)
import_revenue.insert_all(db, csv_file='data/receitas_min.csv')
# insert_all(db, csv_file='data/receitas_min.csv', lines_per_insert=80)
def _importcontratos():
"""Import contratos data to the database"""
from utils import import_contrato, import_contrato_urls
# Contratos
import_contrato.insert_all(db, csv_file='data/contratos-2014.xls')
import_contrato_urls.insert_all(db, csv_file='data/urls.csv')
def _importexecucao():
"""Import execucao data to the database"""
from utils import (import_execucao, geocode_execucao,
update_execucao_year_info)
# Execucao
folder = '../gastos_abertos_dados/Orcamento/execucao/'
import_execucao.insert_all(db, path=folder)
data_folder = 'utils/geocoder/data'
terms_folder = 'utils/geocoder/terms'
geocode_execucao.geocode_all(db, data_folder, terms_folder)
update_execucao_year_info.update_all_years_info(db)
@manager.command
@manager.option('-d', '--data', help='Data type to be imported')
@manager.option('-r', '--reset', help='Remove previous data from database before importing')
def importdata(data='all', reset=False):
"""Import the data to the database"""
data = data.lower()
if reset:
initdb()
if data in ('all', 'revenue'):
_importrevenue()
if data in ('all', 'contratos'):
_importcontratos()
if data in ('all', 'execucao'):
_importexecucao()
@manager.command
@manager.option('-r', '--resource', help='Resource to be indexed')
def build_search_index(resource='all'):
"""Build search index"""
from utils import build_search_index
resource = resource.lower()
if resource in ('all', 'contratos'):
build_search_index.build_contratos_index()
@manager.command
def download_contratos():
"""Download Contratos files"""
from utils import build_search_index
build_search_index.download_contratos_files(csv_file='data/urls.csv', directory='data/contratos')
if __name__ == "__main__":
manager.run()
| agpl-3.0 | 7,894,687,137,202,703,000 | 25.427273 | 101 | 0.665291 | false |
pragya1990/pox_whole_code | pox/boot.py | 1 | 15054 | #!/bin/sh -
# Copyright 2011-2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
# If you have PyPy 1.6+ in a directory called pypy alongside pox.py, we
# use it.
# Otherwise, we try to use a Python interpreter called python2.7, which
# is a good idea if you're using Python from MacPorts, for example.
# We fall back to just "python" and hope that works.
#TODO: Make runnable by itself (paths need adjusting, etc.).
''''true
export OPT="-u -O"
export FLG=""
if [ "$(basename $0)" = "debug-pox.py" ]; then
export OPT=""
export FLG="--debug"
fi
if [ -x pypy/bin/pypy ]; then
exec pypy/bin/pypy $OPT "$0" $FLG "$@"
fi
if type python2.7 > /dev/null; then
exec python2.7 $OPT "$0" $FLG "$@"
fi
exec python $OPT "$0" $FLG "$@"
'''
from __future__ import print_function
import logging
import logging.config
import os
import sys
import traceback
import time
from pox.core import core
import pox.openflow
import pox.openflow.of_01
from pox.lib.util import str_to_bool
# Function to run on main thread
_main_thread_function = None
try:
import __pypy__
except ImportError:
__pypy__ = None
def _do_import (name):
"""
Try to import the named component.
Returns its module name if it was loaded or False on failure.
"""
def show_fail ():
traceback.print_exc()
print("Could not import module:", name)
def do_import2 (base_name, names_to_try):
if len(names_to_try) == 0:
print("Module not found:", base_name)
return False
name = names_to_try.pop(0)
if name in sys.modules:
return name
try:
__import__(name, globals(), locals())
return name
except ImportError:
# There are two cases why this might happen:
# 1. The named module could not be found
# 2. Some dependent module (import foo) or some dependent
# name-in-a-module (e.g., from foo import bar) could not be found.
# If it's the former, we might try a name variation (e.g., without
# a leading "pox."), but if we ultimately can't find the named
# module, we just say something along those lines and stop.
# On the other hand, if the problem is with a dependency, we should
# print a stack trace so that it can be fixed.
# Sorting out the two cases is an ugly hack.
s = sys.exc_info()[1].message.rsplit(" ", 1)
# Sadly, PyPy isn't consistent with CPython here.
if s[0] == "No module named" and (name.endswith(s[1]) or __pypy__):
# It was the one we tried to import itself. (Case 1)
# If we have other names to try, try them!
return do_import2(base_name, names_to_try)
elif (sys.exc_info()[1].message
== "Import by filename is not supported."):
print(sys.exc_info()[1].message)
import os.path
n = name.replace("/", ".").replace("\\", ".")
n = n.replace( os.path.sep, ".")
if n.startswith("pox.") or n.startswith("ext."):
n = n[4:]
print("Maybe you meant to run '%s'?" % (n,))
return False
else:
# This means we found the module we were looking for, but one
# of its dependencies was missing.
show_fail()
return False
except:
# There was some other sort of exception while trying to load the
# module. Just print a trace and call it a day.
show_fail()
return False
return do_import2(name, ["pox." + name, name])
def _do_launch (argv):
component_order = []
components = {}
curargs = {}
pox_options = curargs
for arg in argv:
if not arg.startswith("-"):
if arg not in components:
components[arg] = []
curargs = {}
components[arg].append(curargs)
component_order.append(arg)
else:
arg = arg.lstrip("-").split("=", 1)
arg[0] = arg[0].replace("-", "_")
if len(arg) == 1: arg.append(True)
curargs[arg[0]] = arg[1]
_options.process_options(pox_options)
_pre_startup()
inst = {}
for name in component_order:
cname = name
inst[name] = inst.get(name, -1) + 1
params = components[name][inst[name]]
name = name.split(":", 1)
launch = name[1] if len(name) == 2 else "launch"
name = name[0]
r = _do_import(name)
if r is False: return False
name = r
#print(">>",name)
if launch in sys.modules[name].__dict__:
f = sys.modules[name].__dict__[launch]
if f.__class__ is not _do_launch.__class__:
print(launch, "in", name, "isn't a function!")
return False
multi = False
if f.func_code.co_argcount > 0:
if (f.func_code.co_varnames[f.func_code.co_argcount-1]
== '__INSTANCE__'):
# It's a multi-instance-aware component.
multi = True
# Special __INSTANCE__ paramter gets passed a tuple with:
# 1. The number of this instance (0...n-1)
# 2. The total number of instances for this module
# 3. True if this is the last instance, False otherwise
# The last is just a comparison between #1 and #2, but it's
# convenient.
params['__INSTANCE__'] = (inst[cname], len(components[cname]),
inst[cname] + 1 == len(components[cname]))
if multi == False and len(components[cname]) != 1:
print(name, "does not accept multiple instances")
return False
try:
f(**params)
except TypeError as exc:
instText = ''
if inst[cname] > 0:
instText = "instance {0} of ".format(inst[cname] + 1)
print("Error executing {2}{0}.{1}:".format(name,launch,instText))
import inspect
if inspect.currentframe() is sys.exc_info()[2].tb_frame:
# Error is with calling the function
# Try to give some useful feedback
if _options.verbose:
traceback.print_exc()
else:
exc = sys.exc_info()[0:2]
print(''.join(traceback.format_exception_only(*exc)), end='')
print()
EMPTY = "<Unspecified>"
code = f.__code__
argcount = code.co_argcount
argnames = code.co_varnames[:argcount]
defaults = list((f.func_defaults) or [])
defaults = [EMPTY] * (argcount - len(defaults)) + defaults
args = {}
for n, a in enumerate(argnames):
args[a] = [EMPTY,EMPTY]
if n < len(defaults):
args[a][0] = defaults[n]
if a in params:
args[a][1] = params[a]
del params[a]
if '__INSTANCE__' in args:
del args['__INSTANCE__']
if f.__doc__ is not None:
print("Documentation for {0}:".format(name))
doc = f.__doc__.split("\n")
#TODO: only strip the same leading space as was on the first
# line
doc = map(str.strip, doc)
print('',("\n ".join(doc)).strip())
#print(params)
#print(args)
print("Parameters for {0}:".format(name))
if len(args) == 0:
print(" None.")
else:
print(" {0:25} {1:25} {2:25}".format("Name", "Default",
"Active"))
print(" {0:25} {0:25} {0:25}".format("-" * 15))
for k,v in args.iteritems():
print(" {0:25} {1:25} {2:25}".format(k,str(v[0]),
str(v[1] if v[1] is not EMPTY else v[0])))
if len(params):
print("This component does not have a parameter named "
+ "'{0}'.".format(params.keys()[0]))
return False
missing = [k for k,x in args.iteritems()
if x[1] is EMPTY and x[0] is EMPTY]
if len(missing):
print("You must specify a value for the '{0}'"
"parameter.".format(missing[0]))
return False
return False
else:
# Error is inside the function
raise
elif len(params) > 0 or launch is not "launch":
print("Module %s has no %s(), but it was specified or passed " \
"arguments" % (name, launch))
return False
return True
class Options (object):
def set (self, given_name, value):
name = given_name.replace("-", "_")
if name.startswith("_") or hasattr(Options, name):
# Hey, what's that about?
print("Illegal option:", given_name)
return False
has_field = hasattr(self, name)
has_setter = hasattr(self, "_set_" + name)
if has_field == False and has_setter == False:
print("Unknown option:", given_name)
return False
if has_setter:
setter = getattr(self, "_set_" + name)
setter(given_name, name, value)
else:
if isinstance(getattr(self, name), bool):
# Automatic bool-ization
value = str_to_bool(value)
setattr(self, name, value)
return True
def process_options (self, options):
for k,v in options.iteritems():
if self.set(k, v) is False:
# Bad option!
sys.exit(1)
_help_text = """
POX is a Software Defined Networking controller framework.
The commandline of POX is like:
pox.py [POX options] [C1 [C1 options]] [C2 [C2 options]] ...
Notable POX options include:
--verbose Print more debugging information (especially useful for
problems on startup)
--no-openflow Don't automatically load the OpenFlow module
--log-config=F Load a Python log configuration file (if you include the
option without specifying F, it defaults to logging.cfg)
C1, C2, etc. are component names (e.g., Python modules). Options they
support are up to the module. As an example, you can load a learning
switch app that listens on a non-standard port number by specifying an
option to the of_01 component, and loading the l2_learning component like:
./pox.py --verbose openflow.of_01 --port=6634 forwarding.l2_learning
""".strip()
class POXOptions (Options):
def __init__ (self):
# self.cli = True
self.verbose = False
self.enable_openflow = True
self.log_config = None
def _set_h (self, given_name, name, value):
self._set_help(given_name, name, value)
def _set_help (self, given_name, name, value):
print(_help_text)
#TODO: Summarize options, etc.
sys.exit(0)
def _set_version (self, given_name, name, value):
print(core._get_python_version())
sys.exit(0)
def _set_no_openflow (self, given_name, name, value):
self.enable_openflow = not str_to_bool(value)
# def _set_no_cli (self, given_name, name, value):
# self.cli = not str_to_bool(value)
def _set_log_config (self, given_name, name, value):
if value is True:
# I think I use a better method for finding the path elsewhere...
p = os.path.dirname(os.path.realpath(__file__))
value = os.path.join(p, "logging.cfg")
self.log_config = value
def _set_debug (self, given_name, name, value):
value = str_to_bool(value)
if value:
# Debug implies no openflow and no CLI and verbose
#TODO: Is this really an option we need/want?
self.verbose = True
self.enable_openflow = False
# self.cli = False
_options = POXOptions()
def _pre_startup ():
"""
This function is called after all the POX options have been read in
but before any components are loaded. This gives a chance to do
early setup (e.g., configure logging before a component has a chance
to try to log something!).
"""
_setup_logging()
if _options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if _options.enable_openflow:
pox.openflow.launch() # Default OpenFlow launch
def _post_startup ():
if _options.enable_openflow:
pox.openflow.of_01.launch() # Usually, we launch of_01
def _setup_logging ():
# First do some basic log config...
# This is kind of a hack, but we need to keep track of the handler we
# install so that we can, for example, uninstall it later. This code
# originally lived in pox.core, so we explicitly reference it here.
pox.core._default_log_handler = logging.StreamHandler()
formatter = logging.Formatter(logging.BASIC_FORMAT)
pox.core._default_log_handler.setFormatter(formatter)
logging.getLogger().addHandler(pox.core._default_log_handler)
logging.getLogger().setLevel(logging.INFO)
# Now set up from config file if specified...
#TODO:
# I think we could move most of the special log stuff into
# the log module. You'd just have to make a point to put the log
# module first on the commandline if you wanted later component
# initializations to honor it. Or it could be special-cased?
if _options.log_config is not None:
if not os.path.exists(_options.log_config):
print("Could not find logging config file:", _options.log_config)
sys.exit(2)
logging.config.fileConfig(_options.log_config,
disable_existing_loggers=True)
def set_main_function (f):
print("INSIDE MAIN THREAD")
print(str(_main_thread_function))
global _main_thread_function
if _main_thread_function == f: return True
if _main_thread_function is not None:
import logging
lg = logging.getLogger("boot")
lg.error("Could not set main thread function to: " + str(f))
lg.error("The main thread function is already "
+ "taken by: " + str(_main_thread_function))
return False
_main_thread_function = f
return True
def boot ():
"""
Start up POX.
"""
# Add pox directory to path
sys.path.append(os.path.abspath(os.path.join(sys.path[0], 'pox')))
sys.path.append(os.path.abspath(os.path.join(sys.path[0], 'ext')))
try:
argv = sys.argv[1:]
# Always load cli (first!)
#TODO: Can we just get rid of the normal options yet?
pre = []
while len(argv):
if argv[0].startswith("-"):
pre.append(argv.pop(0))
else:
break
argv = pre + "py --disable".split() + argv
if _do_launch(argv):
_post_startup()
core.goUp()
print("Inside LAunch")
else:
return
except SystemExit:
return
except:
traceback.print_exc()
return
if _main_thread_function:
print("Inside main_thread_function")
_main_thread_function()
else:
#core.acquire()
try:
while core.running:
time.sleep(10)
except:
pass
#core.scheduler._thread.join() # Sleazy
try:
pox.core.core.quit()
except:
pass
| gpl-3.0 | -8,790,270,044,505,829,000 | 29.722449 | 75 | 0.605885 | false |
lipari/slurm | doc/man/man2html.py | 1 | 7210 | #!/usr/bin/env python
import re
import sys
import os
include_pat = r'(<!--\s*#include\s*virtual\s*=\s*"([^"]+)"\s*-->)'
include_regex = re.compile(include_pat)
url_pat = r'(\s+href\s*=\s*")([^"#]+)(#[^"]+)?(")'
url_regex = re.compile(url_pat)
dirname = ''
# Instert tags for options
# Two styles are processed.
# <DT><B>pppppp</B><DD>
# has tag <a id="OPT_pppppp"></a>
# <DT><B>--pppppp</B> or <DT><B>-P</B>, <B>--pppppp</B>
# has tag <a id="OPT_pppppp"></a>
# <H2>hhhh</h2> also has tag has tag <a id="SECTION_hhhh"></a> inserted
def insert_tag(html, lineIn):
if lineIn[0:4] == "<H2>":
posEnd = lineIn.find("</H2>")
if posEnd != -1:
html.write('<a id="SECTION_' + lineIn[4:posEnd] + '"></a>\n')
return
if lineIn[0:7] != "<DT><B>":
return
posBgn = lineIn.find("--")
if posBgn == -1:
# 1st form
posBgn = 5
posBgn = posBgn + 2
posEnd = lineIn.find("</B>",posBgn)
if posEnd == -1:
# poorly constructed
return
html.write('<a id="OPT_' + lineIn[posBgn:posEnd] + '"></a>\n')
return
def llnl_references(line):
manStr = "Refer to mc_support.html"
htmlStr = 'Refer to <a href="mc_support.html">mc_support</a>'
lineFix = line.replace(manStr,htmlStr)
if lineFix != line:
return lineFix
manStr = '<A HREF="http://www.schedmd.com/slurmdocs/mc_support.html">http://www.schedmd.com/slurmdocs/mc_support.html</A>'
htmlStr = 'the <a href="mc_support.html">mc_support</a> document'
lineFix = line.replace(manStr,htmlStr)
if lineFix != line:
return lineFix
manStr = '<A HREF="http://www.schedmd.com/slurmdocs/dist_plane.html.">http://www.schedmd.com/slurmdocs/dist_plane.html.</A>'
htmlStr = 'the <a href="dist_plane.html">dist_plane</a> document'
lineFix = line.replace(manStr,htmlStr)
if lineFix != line:
return lineFix
manStr = '<<A HREF="http://www.schedmd.com/slurmdocs/mpi_guide.html">http://www.schedmd.com/slurmdocs/mpi_guide.html</A>>'
htmlStr = '<a href="mpi_guide.html">mpi_guide</a>'
lineFix = line.replace(manStr,htmlStr)
if lineFix != line:
return lineFix
manStr = '(<A HREF="http://www.schedmd.com/slurmdocs/power_save.html).">http://www.schedmd.com/slurmdocs/power_save.html).</A>'
htmlStr = '<a href="power_save.html">power_save</a>'
lineFix = line.replace(manStr,htmlStr)
if lineFix != line:
return lineFix
manStr = '<A HREF="http://www.schedmd.com/slurmdocs/cons_res.html">http://www.schedmd.com/slurmdocs/cons_res.html</A>'
htmlStr = '<a href="cons_res.html">cons_res</a>'
lineFix = line.replace(manStr,htmlStr)
if lineFix != line:
return lineFix
manStr = '<A HREF="http://www.schedmd.com/slurmdocs/cons_res_share.html">http://www.schedmd.com/slurmdocs/cons_res_share.html</A>'
htmlStr = '<a href="cons_res_share.html">cons_res_share</a>'
lineFix = line.replace(manStr,htmlStr)
if lineFix != line:
return lineFix
manStr = '<A HREF="http://www.schedmd.com/slurmdocs/gang_scheduling.html">http://www.schedmd.com/slurmdocs/gang_scheduling.html</A>'
htmlStr = '<a href="gang_scheduling.html">gang_scheduling</a>'
lineFix = line.replace(manStr,htmlStr)
if lineFix != line:
return lineFix
manStr = '<A HREF="http://www.schedmd.com/slurmdocs/preempt.html">http://www.schedmd.com/slurmdocs/preempt.html</A>'
htmlStr = '<a href="preempt.html">preempt</a>'
lineFix = line.replace(manStr,htmlStr)
if lineFix != line:
return lineFix
return line
def relative_reference(lineIn):
fullRef = "/cgi-bin/man/man2html"
lenRef = len(fullRef)
refAnchor="<A HREF=";
lenRefAnchor = len(refAnchor)
lineOt = ""
cursor = 0
posHREF = lineIn.find(fullRef,cursor)
if posHREF == -1:
return lineIn
if lineIn[posHREF+lenRef] != "?":
pos = lineIn.find("Return to Main Contents",cursor)
if pos != -1:
return ""
return "<i>man2html</i> "
while posHREF != -1:
posRefAnchor = lineIn.find(refAnchor,cursor)
lineOt = lineOt + lineIn[cursor:posRefAnchor+lenRefAnchor]
cursor = posHREF + lenRef + 3
lineOt = lineOt + '"'
posQuote = lineIn.find('"',cursor)
lineOt = lineOt + lineIn[cursor:posQuote] + ".html"
cursor = posQuote
posHREF = lineIn.find(fullRef,cursor)
lineOt = lineOt + lineIn[cursor:]
return lineOt
def include_virtual(matchobj):
global dirname
if dirname:
filename = dirname + '/' + matchobj.group(2)
else:
filename = matchobj.group(2)
if os.access(filename, os.F_OK):
#print 'Including file', filename
lines = file(filename, 'r').read()
return lines
else:
return matchobj.group(0)
def url_rewrite(matchobj):
global dirname
if dirname:
localpath = dirname + '/' + matchobj.group(2)
else:
localpath = matchobj.group(2)
if matchobj.group(2)[-6:] == '.shtml' and os.access(localpath, os.F_OK):
location = matchobj.group(2)
if matchobj.group(3) is None:
newname = location[:-6] + '.html'
else:
newname = location[:-6] + '.html' + matchobj.group(3)
#print 'Rewriting', location, 'to', newname
return matchobj.group(1) + newname + matchobj.group(4)
else:
return matchobj.group(0)
files = []
for f in sys.argv[3:]:
posLastDot = f.rfind(".")
mhtmlname = f[:posLastDot] + ".mhtml"
cmd = "man2html " + f + "> " + mhtmlname
os.system(cmd)
print ">>>>>>> " + mhtmlname
files.append(mhtmlname)
for filename in files:
dirname, basefilename = os.path.split(filename)
newfilename = basefilename[:-6] + '.html'
print 'Converting', filename, '->', newfilename
shtml = file(filename, 'r')
html = file(newfilename, 'w')
lines = file(sys.argv[1], 'r').read()
lines = lines.replace(".shtml",".html")
html.write(lines)
# html.write(<!--#include virtual="header.txt"-->)
for line in shtml.readlines():
# Remove html header/footer created by man2html
if line == "Content-type: text/html\n":
continue
if line[:6] == "<HTML>":
continue
if line[:7] == "</HEAD>":
continue
if line[:7] == "</HTML>":
continue
if line[:7] == "</BODY>":
continue
line = include_regex.sub(include_virtual, line)
# Special case some html references
line = llnl_references(line)
#insert tags for some options
insert_tag(html, line)
# Make man2html links relative ones
line = relative_reference(line)
line = url_regex.sub(url_rewrite, line)
html.write(line)
lines = file(sys.argv[2], 'r').read()
html.write(lines)
# html.write(<!--#include virtual="footer.txt"-->)
html.close()
shtml.close()
os.remove(filename)
| gpl-2.0 | 4,936,683,348,735,532,000 | 35.231156 | 140 | 0.580166 | false |
sujitmhj/document_similarity_based_on_bloom_filter | main/views.py | 1 | 3367 | from django.shortcuts import render, HttpResponse
from django.views.generic import View
from pybloom.pybloom import BloomFilter
import uuid
from django import forms
import json
from nltk.corpus import stopwords
import nltk
nltk.data.path.append('/home/sujit/nltk_data')
# Create your views here.
### Settings for similarity detection ###
# shinglelength = 4
def get_filter(shingles):
f = BloomFilter(capacity=10000, error_rate=0.001)
for sg in shingles:
f.add(" ".join(sg))
return f
def shingle(tokens, shinglelength):
arr = []
if len(tokens)%2 == 1:
max_i = len(tokens) - shinglelength
else:
max_i = len(tokens) - shinglelength + 1
for i in range(max_i):
arr.append(tokens[i:i+shinglelength])
return arr
def get_similarity_value(tokens_A,tokens_B,single_length):
shingle_A = shingle(tokens_A, single_length)
shingle_B = shingle(tokens_B, single_length)
# print(shingle_A)
b1 = get_filter(shingle_A)
b2 = get_filter(shingle_B)
common_count = 0
for sg in shingle_B:
if " ".join(sg) in b1:
common_count = common_count + 1
a_union_b = (len(shingle_A) + len(shingle_B) - common_count)
print("single_size:", single_length)
print("union:",a_union_b)
print("common:", common_count)
similarity = (common_count*1.0)/a_union_b
return similarity
def calculate_similarity(doc1, doc2, single_length):
tokens_A = doc1.split()
tokens_B = doc2.split()
filtered_words_A = [word for word in tokens_A if word not in stopwords.words('english')]
filtered_words_B = [word for word in tokens_B if word not in stopwords.words('english')]
similarity_with_stop_words = get_similarity_value(tokens_A,tokens_B,single_length)
similarity_without_stop_words = get_similarity_value(filtered_words_A,filtered_words_B,single_length)
# if
# # print(b1.bitarray)
# # print(b2.bitarray)
# union = b1.union(b2)
# intersection = b1.intersection(b2)
# total_one = intersection.bitarray.count()
# # print b1.bitarray
# # print b2.bitarray
# # print(union.bitarray.count())
# total_bits = intersection.num_bits
# percent = total_one*100.00/union.bitarray.count()
return {"s_without":similarity_without_stop_words, "s_with":similarity_with_stop_words}
class FileUploadForm(forms.Form):
file1 = forms.FileField()
file2 = forms.FileField()
def handle_uploaded_file(f):
file_name = uuid_str = "../uploaded_files/"+str(uuid.uuid1()) +".txt"
with open(file_name, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
class CalculateSimilarity(View):
def get(self, request, *args, **kwargs):
return HttpResponse('Hello, World!')
def post(self, request, *args, **kwargs):
doc1 = request.POST.get("file1")
doc2 = request.POST.get("file2")
single_size = int(request.POST.get("shingle_size",4))
# form = FileUploadForm(request.POST)
# if form.is_valid():
# handle_uploaded_file(request.FILES['file1'])
# handle_uploaded_file(frequest.FILES['file2'])
# # form.file1
if doc1!=None and doc2!=None:
return HttpResponse(json.dumps(calculate_similarity(doc1,doc2, single_size)))
# print "not gone"
| mit | -6,982,455,363,895,425,000 | 24.507576 | 105 | 0.647461 | false |
mrnkv/MCMaker | main.py | 1 | 25645 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import codecs
import sys
import string
import zipfile
import codecs
import shutil
import uuid
from ConfigParser import SafeConfigParser
from datetime import date
from PyQt4.QtGui import QApplication, QDialog, QMainWindow, QFileDialog
from PyQt4.QtGui import QTableWidgetItem, QAbstractItemView, QMessageBox
from PyQt4.QtCore import *
from Forms.ui_mainwindow import Ui_MainWindow
from Forms.ui_groups import Ui_GroupDialog
from Forms.ui_groups_mvc import Ui_GroupDialogMVC
from Forms.ui_komand import Ui_Komand
from Forms.ui_position import Ui_Position
from Forms.ui_dept import Ui_Dept
from Forms.ui_empls import Ui_Employee
from Forms.ui_sltask import Ui_SlTask
from Forms.ui_selectemployee import Ui_SelectEmployee
from entities import *
SLTASK_TEMPLATE = template_dir+template_slrec_file
#'file:///home/mrnkv/MCMaker/Templates/SlTask.odt'
data = {}
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setupUi(self)
self.model = KomandModel(self.komandTableView)
self.sltaskmodel = SlTaskModel(self.servrecTableView)
self.komandTableView.setModel(self.model)
self.servrecTableView.setModel(self.sltaskmodel)
self.connect(self.groups, SIGNAL("triggered()"), self.groupsDialogShow)
self.connect(self.employees, SIGNAL("triggered()"), self.employeesDialogShow)
self.connect(self.positions, SIGNAL("triggered()"), self.positionsDialogShow)
self.connect(self.depts, SIGNAL("triggered()"), self.deptsDialogShow)
self.komandTableView.setSelectionBehavior(QAbstractItemView.SelectRows)
self.servrecTableView.setSelectionBehavior(QAbstractItemView.SelectRows)
self.komandTableView.setSelectionMode(QAbstractItemView.SingleSelection)
self.servrecTableView.setSelectionMode(QAbstractItemView.SingleSelection)
self.komandTableView.selectRow(0)
def addSlTask(self):
index = self.komandTableView.currentIndex()
print 'INDEX', index.row()
if not index.isValid():
print 'Index is not valid'
QMessageBox.warning(self, u"Добаление записки", u"Сделайте выбор командировки")
return
selEmplDialog = SelectEmployeeDialog()
if not selEmplDialog.exec_():
return
index = selEmplDialog.tableView.currentIndex()
if not index.isValid():
return
row = index.row()
employee = selEmplDialog.model.data[row]
komand = self.model.data[index.row()]
dialog = SlTaskDialog()
dialog.dateEdit.setDate(QDate.currentDate())
dialog.lineEdit_9.setText(komand.komand_zadan)
dialog.lineEdit_6.setText(komand.komand_addr)
dialog.lineEdit_7.setText(komand.komand_org)
dialog.dateEdit_2.setDate(komand.komand_start)
dialog.dateEdit_3.setDate(komand.komand_end)
dialog.lineEdit_8.setText(komand.komand_cos)
dialog.lineEdit_2.setText(employee.family+' '+employee.name+' '+employee.sname)
dialog.lineEdit_3.setText(str(employee.tab_num))
position = session.query(Position).filter_by(pos_id = employee.position).one()
dialog.lineEdit_5.setText(position.pos_name)
dept = session.query(Dept).filter_by(dept_id = position.dept).one()
group = session.query(Group).filter_by(group_id = position.group).one()
dialog.lineEdit_4.setText(dept.dept_long_name+' '+group.group_long_name)
if not dialog.exec_():
print 'Not Add slTask'
return
servrecord = ServRecord(
komand.komand_id,
int(unicode(dialog.lineEdit.text())), #record_num
date(dialog.dateEdit.date().year(),
dialog.dateEdit.date().month(),
dialog.dateEdit.date().day()), #record_date
unicode(dialog.lineEdit_2.text()), #record_fio
int (dialog.lineEdit_3.text()), #record_tabnum
unicode(dialog.lineEdit_4.text()), #record_str_podr
unicode(dialog.lineEdit_5.text()), #record_dolg
unicode(dialog.lineEdit_6.text()), #record_addr
unicode(dialog.lineEdit_7.text()), #record_ org
date(dialog.dateEdit_2.date().year(),
dialog.dateEdit_2.date().month(),
dialog.dateEdit_2.date().day()), #record_start
date(dialog.dateEdit_3.date().year(),
dialog.dateEdit_3.date().month(),
dialog.dateEdit_3.date().day()), #record_end
unicode(dialog.lineEdit_9.text()), #record_zadan
unicode(dialog.lineEdit_8.text()), #record_osn
unicode(dialog.lineEdit_10.text()),#record_ruk_str_otpr_dolg,
unicode(dialog.lineEdit_11.text()),#record_ruk_str_otpr_fio,
unicode(dialog.lineEdit_12.text()),#record_ruk_str_prin_dolg,
unicode(dialog.lineEdit_13.text()),#record_ruk_str_prin_fio,
unicode(dialog.lineEdit_14.text()),#record_ruk_org_dolg,
unicode(dialog.lineEdit_15.text())#record_ruk_org_fio
)
self.sltaskmodel.emit(SIGNAL("layoutAboutToBeChanged()"))
session.add(servrecord)
session.commit()
self.sltaskmodel.data = session.query(ServRecord).all()
self.sltaskmodel.emit(SIGNAL("layoutChanged()"))
self.servrecTableView.reset()
def delSlTask(self):
index = self.servrecTableView.currentIndex()
if not index.isValid():
return
row = index.row()
self.sltaskmodel.emit(SIGNAL("layoutAboutToBeChanged()"))
session.delete(self.sltaskmodel.data[row])
session.commit()
self.sltaskmodel.data = session.query(ServRecord).all()
self.sltaskmodel.emit(SIGNAL("layoutChanged()"))
def printSlTask(self):
index = self.servrecTableView.currentIndex()
if not index.isValid():
return
row = index.row()
sltask = self.sltaskmodel.data[row]
global data
data['$record_num']= str(sltask.record_num)
data['$record_date']= sltask.record_date.strftime('%d.%m.%Y')+u'г.'
data['$record_fio']= sltask.record_fio
data['$record_tabnum']= str(sltask.record_tabnum)
data['$record_str_podr']= sltask.record_str_podr
data['$record_dolg']= sltask.record_dolg
data['$record_addr']= sltask.record_addr
data['$record_org']= sltask.record_org
data['$record_start']= sltask.record_start.strftime('%d.%m.%Y')+u'г.'
data['$record_end']= sltask.record_end.strftime('%d.%m.%Y')+u'г.'
data['$record_duration'] =str((sltask.record_end - sltask.record_start).days + 1 )
data['$record_zadan']= sltask.record_zadan
data['$record_osn']= sltask.record_osn
data['$record_ruk_str_otpr_dolg']= sltask.record_ruk_str_otpr_dolg
data['$record_ruk_str_otpr_fio']= sltask.record_ruk_str_otpr_fio
data['$record_ruk_str_prin_dolg']= sltask.record_ruk_str_prin_dolg
data['$record_ruk_str_prin_fio']= sltask.record_ruk_str_prin_fio
data['$record_ruk_org_dolg']= sltask.record_ruk_org_dolg
data['$record_ruk_org_fio']= sltask.record_ruk_org_fio
'''
Здесь
-открыть шаблон документа
-произвести замену всех переменных
-Сохранить
'''
uid = uuid.uuid4()
tmpdir=os.path.expanduser('~/.MCMaker/')+str(uid)+'/'
os.makedirs(tmpdir)
tmpfilename = tmpdir+'tmpdoc.zip'
with open(tmpfilename, 'a') as f:
with open(SLTASK_TEMPLATE) as t:
shutil.copyfileobj(t, f)
with zipfile.ZipFile(tmpfilename) as zf:
zf.extractall(tmpdir)
os.remove(tmpfilename)
content = ''
with codecs.open(tmpdir+'content.xml', 'r', 'utf-8') as f:
content = f.read()
for i in data.keys():
content = string.replace(content, i, data[i])
with codecs.open(tmpdir+'content.xml', 'w', 'utf-8') as f:
f.write(content)
os.chdir(tmpdir)
shutil.make_archive(docs_dir+'SlTask', 'zip')
shutil.move(docs_dir+'SlTask.zip', docs_dir+data['$record_num']+'_'+data['$record_fio']+'.odt')
shutil.rmtree(tmpdir)
def addAccount(self):
pass
def delAccount(self):
pass
def printAccount(self):
pass
def addKommand(self):
dialog = KomandDialog()
if dialog.exec_():
komand_zadan = unicode(dialog.lineEdit.text())
komand_addr = unicode(dialog.lineEdit_3.text())
komand_org = unicode(dialog.lineEdit_2.text())
komand_start = date(dialog.startDateEdit.date().year(), dialog.startDateEdit.date().month(), dialog.startDateEdit.date().day())
komand_end = date(dialog.endDateEdit.date().year(), dialog.endDateEdit.date().month(), dialog.endDateEdit.date().day())
komand_cos = unicode(dialog.lineEdit_4.text())
komand = Komand(komand_zadan, komand_addr, komand_org, komand_start,
komand_end, komand_cos)
session.add(komand)
session.commit()
self.model.emit(SIGNAL("layoutAboutToBeChanged()"))
self.model.data = session.query(Komand).all()
self.model.emit(SIGNAL("layoutChanged()"))
self.tableView.reset()
self.komandTableView.selectRow(0)
def delKomand(self):
print 'Delete komand...'
index = self.tableView.currentIndex()
if not index.isValid():
return
row = index.row()
self.model.emit(SIGNAL("layoutAboutToBeChanged()"))
session.delete(self.model.data[row])
session.commit()
self.model.data = session.query(Komand).all()
self.model.emit(SIGNAL("layoutChanged()"))
def groupsDialogShow(self):
dialog = GroupDialogMVC()
dialog.exec_()
def employeesDialogShow(self):
dialog = EmployeeDialog()
dialog.exec_()
def positionsDialogShow(self):
dialog = PositionDialog()
dialog.exec_()
def deptsDialogShow(self):
dialog = DeptDialog()
dialog.exec_()
class EmployeePositionsModel(QAbstractTableModel):
def __init__(self, parent):
QAbstractTableModel.__init__(self)
self.data = list(session.query(Position).filter(Position.employee == 0))
for i in self.data:
pass
def Data(self, position, index):
if index == 0:
dept = session.query(Dept).filter_by(dept_id = position.dept).one()
return dept.dept_short_name
elif index == 1:
group = session.query(Group).filter_by(group_id = position.group).one()
return group.group_short_name
elif index == 2:
return position.pos_name
def rowCount(self, parent):
return len(self.data)
def columnCount(self, parent):
return 3
def data(self, index, role):
if not index.isValid():
return None
if role != Qt.DisplayRole:
return None
return QVariant(self.Data(self.data[index.row()], index.column()))
class EmployeeModel(QAbstractTableModel):
def __init__(self, parent):
QAbstractTableModel.__init__(self)
self.data = session.query(Employee).all()
def EmployeeData(self, employee, index):
if index == 0:
return employee.family
if index == 1:
return employee.name
if index == 2:
return employee.sname
def rowCount(self, parent):
return len(self.data)
def columnCount(self, parent):
return 3
def data(self, index, role):
if not index.isValid():
return None
if role != Qt.DisplayRole:
return None
return QVariant(self.EmployeeData(self.data[index.row()], index.column()))
class SlTaskModel(QAbstractTableModel):
def __init__(self, parent):
QAbstractTableModel.__init__(self)
self.data = session.query(ServRecord).all()
def SlTaskData(self, sltask, index):
if index == 0:
return sltask.record_num
if index == 1:
return sltask.record_date.isoformat()
if index == 2:
return sltask.record_fio
def rowCount(self, parent):
return len(self.data)
def columnCount(self, parent):
return 3
def data(self,index, role):
if not index.isValid():
return None
if role != Qt.DisplayRole:
return None
return QVariant(self.SlTaskData(self.data[index.row()], index.column()))
class SelectEmployeeDialog(QDialog, Ui_SelectEmployee):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.model = EmployeeModel(self.tableView)
self.tableView.setModel(self.model)
self.tableView.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tableView.setSelectionMode(QAbstractItemView.SingleSelection)
class EmployeeDialog(QDialog, Ui_Employee):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.emplsPosModel = EmployeePositionsModel(self.positionsTableView)
self.positionsTableView.setModel(self.emplsPosModel)
self.emplsModel = EmployeeModel(self.emplsTableView)
self.emplsTableView.setModel(self.emplsModel)
self.emplsTableView.setSelectionBehavior(QAbstractItemView.SelectRows)
self.positionsTableView.setSelectionBehavior(QAbstractItemView.SelectRows)
self.emplsTableView.setSelectionMode(QAbstractItemView.SingleSelection)
self.positionsTableView.setSelectionMode(QAbstractItemView.SingleSelection)
def addEmployee(self):
name = unicode(self.emplName.text())
f_name = unicode(self.emplFirstName.text())
s_name = unicode(self.emplSerName.text())
tab_num = int(unicode(self.emplTabNum.text()))
index = self.positionsTableView.currentIndex()
if not index.isValid():
return
row = index.row()
self.emplsPosModel.emit(SIGNAL("layoutAboutToBeChanged()"))
self.emplsModel.emit(SIGNAL("layoutAboutToBeChanged()"))
position = self.emplsPosModel.data[row]
position_id = position.pos_id
employee = Employee(name, f_name, s_name, position_id, tab_num)
session.add(employee)
session.commit()
position.employee = int(employee.empl_id)
session.commit()
self.emplsPosModel.data = list(session.query(Position).filter(Position.employee == 0))
self.emplsModel.data = session.query(Employee).all()
self.emplsModel.emit(SIGNAL("layoutChanged()"))
self.emplsPosModel.emit(SIGNAL("layoutChanged()"))
self.emplsTableView.reset()
self.positionsTableView.reset()
self.emplFirstName.setText('')
self.emplSerName.setText('')
self.emplTabNum.setText('')
def delEmployee(self):
index = self.emplsTableView.currentIndex()
if not index.isValid():
return
row = index.row()
self.emplsPosModel.emit(SIGNAL("layoutAboutToBeChanged()"))
self.emplsModel.emit(SIGNAL("layoutAboutToBeChanged()"))
self.emplsTableView.reset()
empl = self.emplsModel.data[row]
pos =session.query(Position).filter_by(employee = empl.position).first()
pos.employee = 0
session.delete(self.emplsModel.data[row])
session.commit()
self.emplsModel.data = session.query(Employee).all()
self.emplsPosModel.data = list(session.query(Position).filter(Position.employee == 0))
self.emplsModel.emit(SIGNAL("layoutChanged()"))
self.emplsPosModel.emit(SIGNAL("layoutChanged()"))
self.positionsTableView.reset()
pass
class DeptModel(QAbstractTableModel):
def __init__(self, parent):
QAbstractTableModel.__init__(self)
self.data = session.query(Dept).all()
def deptData(self, dept, index):
if index == 0:
return dept.dept_id
elif index == 1:
return dept.dept_long_name
elif index == 2:
return dept.dept_short_name
def rowCount(self, parent):
return len(self.data)
def columnCount(self, parent):
return 3
def data(self, index, role):
if not index.isValid():
return None
if role != Qt.DisplayRole:
return None
return QVariant(self.deptData(self.data[index.row()], index.column()))
class DeptDialog(QDialog, Ui_Dept):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.model = DeptModel(self.deptsTableView)
self.deptsTableView.setModel(self.model)
def addDept(self):
l_name = unicode(self.deptLongName.text())
s_name = unicode(self.deptShortName.text())
print l_name, s_name
if len(l_name) > 0 and len(s_name) > 0:
dept = Dept(s_name, l_name)
session.add(dept)
session.commit()
self.model.emit(SIGNAL("layoutAboutToBeChanged()"))
self.model.data = session.query(Dept).all()
self.model.emit(SIGNAL("layoutChanged()"))
self.deptsTableView.reset()
self.deptLongName.setText('')
self.deptShortName.setText('')
else:
print u"Задайте краткое и полное наименование службы"
pass
def delDept(self):
print 'Deleting department ...'
index = self.deptsTableView.currentIndex()
if not index.isValid():
return
row = index.row()
self.model.emit(SIGNAL("layoutAboutToBeChanged()"))
session.delete(self.model.data[row])
self.model.data = session.query(Dept).all()
self.model.emit(SIGNAL("layoutChanged()"))
def cancelAddDept(self):
self.deptLongName.setText('')
self.deptShortName.setText('')
class PositionModel(QAbstractTableModel):
def __init__(self, parent):
QAbstractTableModel.__init__(self)
self.data = session.query(Position).all()
print type(len(self.data)), len(self.data)
def positionData(self, position, index):
if index == 0:
dept = session.query(Dept).filter_by(dept_id = position.dept).one()
return dept.dept_short_name
elif index == 1:
group = session.query(Group).filter_by(group_id = position.group).one()
return group.group_short_name
elif index == 2:
return position.pos_name
elif index == 3:
return position.pos_short_name
def rowCount(self, parent):
return len(self.data)
def columnCount(self, parent):
return 4 #
def data(self, index, role):
if not index.isValid():
return None
if role != Qt.DisplayRole:
return None
return QVariant(self.positionData(self.data[index.row()], index.column()))
class PositionDialog(QDialog, Ui_Position):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.positionModel = PositionModel(self.positionsTableView)
self.positionsTableView.setModel(self.positionModel)
self.deptModel= DeptModel(self.deptsComboBox)
self.deptsComboBox.setModel(self.deptModel)
self.deptsComboBox.setModelColumn(1)
self.groupModel = GroupModel(self.groupsComboBox)
self.groupsComboBox.setModel(self.groupModel)
self.groupsComboBox.setModelColumn(1)
def addPosition(self):
print 'Adding position ...'
group_pos = self.groupModel.data[self.groupsComboBox.currentIndex()]
dept_pos = self.deptModel.data[self.deptsComboBox.currentIndex()]
l_name = unicode(self.posLongName.text())
s_name = unicode(self.posShortName.text())
if len(l_name) > 0 and len(s_name) > 0:
position = Position(l_name, s_name, group_pos, dept_pos)
session.add(position)
session.commit()
self.positionModel.emit(SIGNAL("layoutAboutToBeChanged()"))
self.positionModel.data = session.query(Position).all()
self.positionModel.emit(SIGNAL("layoutChanged()"))
self.positionsTableView.reset()
else:
print u"Задайте краткое и полное наименование должности"
def delPosition(self):
print 'Deleting position ...'
index = self.positionsTableView.currentIndex()
if not index.isValid():
return
row = index.row()
self.positionModel.emit(SIGNAL("layoutAboutToBeChanged()"))
session.delete(self.positionModel.data[row])
self.positionModel.data = session.query(Position).all()
self.positionModel.emit(SIGNAL("layoutChanged()"))
def cancelAddPosition(self):
self.posLongName.setText('')
self.posShortName.setText('')
class KomandModel(QAbstractTableModel):
def __init__(self, parent):
QAbstractTableModel.__init__(self)
self.data = session.query(Komand).all()
def komandData(self, komand, index):
if index == 0:
return komand.komand_id
elif index == 1:
return komand.komand_start.isoformat()
elif index == 2:
return komand.komand_end.isoformat()
elif index == 3:
return komand.komand_org
elif index == 4:
return komand.komand_addr
elif index == 5:
return komand.komand_zadan
elif index == 6:
return komand.komand_cos
def rowCount(self, parent):
return len(self.data)
def columnCount(self, parent):
return 7
def data(self, index, role):
if not index.isValid():
return None
if role != Qt.DisplayRole:
return None
return QVariant(self.komandData(self.data[index.row()], index.column()))
class KomandDialog(QDialog, Ui_Komand):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.startDateEdit.setDate(QDate.currentDate())
self.endDateEdit.setDate(QDate.currentDate())
class SlTaskDialog(QDialog, Ui_SlTask):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.lineEdit_10.setText(u'Начальник лаборатории связи')
self.lineEdit_12.setText(u'Начальник лаборатории связи')
self.lineEdit_14.setText(u'Начльник Елецкого ЛПУМГ')
self.lineEdit_11.setText(u'В.В.Меренков')
self.lineEdit_13.setText(u'В.В.Меренков')
self.lineEdit_15.setText(u'В.Н.Сидорцов')
class GroupDialogMVC(QDialog, Ui_GroupDialogMVC):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.model = GroupModel(self.groupsTableView)
self.groupsTableView.setModel(self.model)
def delGroup(self):
print 'Deleting group...'
index = self.groupsTableView.currentIndex()
if not index.isValid():
return
row = index.row()
self.model.emit(SIGNAL("layoutAboutToBeChanged()"))
session.delete(self.model.data[row])
self.model.data = session.query(Group).all()
self.model.emit(SIGNAL("layoutChanged()"))
def addGroup(self):
print 'Adding group ...'
l_name = unicode(self.groupLongName.text())
s_name = unicode(self.groupShortName.text())
print l_name, s_name
if len(l_name) > 0 and len(s_name) > 0:
group = Group(group_long_name = l_name, group_short_name = s_name)
session.add(group)
session.commit()
self.model.emit(SIGNAL("layoutAboutToBeChanged()"))
self.model.data = session.query(Group).all()
self.model.emit(SIGNAL("layoutChanged()"))
self.groupsTableView.reset()
self.groupLongName.setText('')
self.groupShortName.setText('')
else:
print u"Задайте краткое и полное наименоание группы"
def addGroupCancel(self):
self.groupLongName.setText('')
self.groupShortName.setText('')
class GroupModel(QAbstractTableModel):
def __init__(self, parent):
QAbstractTableModel.__init__(self)
self.data = session.query(Group).all()
print type(len(self.data)), len(self.data)
def groupData(self, group, index):
if index == 0:
return group.group_id
elif index == 1:
return group.group_short_name
elif index == 2:
return group.group_long_name
def rowCount(self, parent):
return len(self.data)
def columnCount(self, parent):
return 3 #id, long_name, short_name
def data(self, index, role):
if not index.isValid():
return None
if role != Qt.DisplayRole:
return None
return QVariant(self.groupData(self.data[index.row()], index.column()))
app = QApplication(sys.argv)
session = createSession('')
window = MainWindow()
window.show()
sys.exit(app.exec_())
| gpl-2.0 | -6,862,576,893,923,554,000 | 38.309006 | 139 | 0.620976 | false |
nwjs/chromium.src | third_party/sqlite/scripts/extract_sqlite_api_unittest.py | 1 | 10641 | #!/usr/bin/env python3
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for extract_sqlite_api.py.
These tests should be getting picked up by the PRESUBMIT.py in this directory.
"""
from importlib.machinery import SourceFileLoader
import os
import shutil
import sys
import tempfile
import unittest
class ExtractSqliteApiUnittest(unittest.TestCase):
def setUp(self):
self.test_root = tempfile.mkdtemp()
source_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'extract_sqlite_api.py')
self.extractor = SourceFileLoader('extract_api', source_path).load_module()
def tearDown(self):
if self.test_root:
shutil.rmtree(self.test_root)
def testExtractLineTuples(self):
golden = [(1, 'Line1'), (2, ''), (3, 'Line 2'), (4, 'Line3'), (5, '')]
text_with_newline = "Line1\n\nLine 2 \nLine3\n"
self.assertEqual(self.extractor.ExtractLineTuples(text_with_newline),
golden)
golden = [(1, 'Line1'), (2, ''), (3, 'Line 2'), (4, 'Line3')]
text_without_newline = "Line1\n\nLine 2 \nLine3"
self.assertEqual(self.extractor.ExtractLineTuples(text_without_newline),
golden)
def testExtractPreprocessorDirectives(self):
lines = [
(1, '// Header comment'),
(2, '#define DIRECTIVE 1'),
(3, 'int main() { // \\'),
(4, '}'),
(5, ''),
(6, '#define MULTILINE \\'),
(7, 'MORE_MULTILINE_DIRECTIVE\\'),
(8, 'END_MULTILINE_DIRECTIVE'),
(9, 'void code() { }'),
]
directives, code_lines = self.extractor.ExtractPreprocessorDirectives(lines)
self.assertEqual(directives, [
'#define DIRECTIVE 1',
'#define MULTILINE \nMORE_MULTILINE_DIRECTIVE\nEND_MULTILINE_DIRECTIVE',
])
self.assertEqual(code_lines, [
(1, '// Header comment'),
(3, 'int main() { // \\'),
(4, '}'),
(5, ''),
(9, 'void code() { }'),
])
def testExtractDefineMacroName(self):
self.assertEqual(
'SQLITE_API', self.extractor.ExtractDefineMacroName(
'#define SQLITE_API 1'))
self.assertEqual(
'SQLITE_API', self.extractor.ExtractDefineMacroName(
'#define SQLITE_API'))
self.assertEqual(
'SQLITE_API', self.extractor.ExtractDefineMacroName(
'#define SQLITE_API\n1'))
self.assertEqual(
'SQLITE_API', self.extractor.ExtractDefineMacroName(
'# define SQLITE_API 1'))
self.assertEqual(
'SQLITE_API', self.extractor.ExtractDefineMacroName(
'#\tdefine\tSQLITE_API\t1'))
self.assertEqual(
None, self.extractor.ExtractDefineMacroName(
' #define SQLITE_API 1'))
self.assertEqual(
None, self.extractor.ExtractDefineMacroName(
' #define SQLITE_API() 1'))
self.assertEqual(None, self.extractor.ExtractDefineMacroName(''))
def testRemoveLineComments(self):
self.assertEqual(
'word;', self.extractor.RemoveLineComments('word;'))
self.assertEqual(
'', self.extractor.RemoveLineComments(''))
self.assertEqual(
'', self.extractor.RemoveLineComments('// comment'))
self.assertEqual(
'', self.extractor.RemoveLineComments('/* comment */'))
self.assertEqual(
'word;', self.extractor.RemoveLineComments('wo/*comment*/rd;'))
self.assertEqual(
'word;*/', self.extractor.RemoveLineComments('wo/*comment*/rd;*/'))
self.assertEqual(
'word;*/', self.extractor.RemoveLineComments('wo/*/*comment*/rd;*/'))
self.assertEqual(
'word;', self.extractor.RemoveLineComments('wo/*comm//ent*/rd;'))
def testRemoveComments(self):
lines = [
(1, 'code();'),
(2, 'more_code(); /* with comment */ more_code();'),
(3, '/**'),
(4, 'Spec text'),
(5, '**/ spec_code();'),
(6, 'late_code(); /* with comment */ more_late_code(); /* late comment'),
(7, 'ends here // C++ trap */ code(); // /* C trap'),
(8, 'last_code();'),
]
self.assertEqual(self.extractor.RemoveComments(lines), [
(1, 'code();'),
(2, 'more_code(); more_code();'),
(3, ''),
(5, ' spec_code();'),
(6, 'late_code(); more_late_code(); '),
(7, ' code(); '),
(8, 'last_code();'),
])
def testToStatementTuples(self):
lines = [
(1, 'void function();'),
(2, 'int main('),
(3, ' int argc, char* argv) {'),
(4, ' statement1; statement2;'),
(5, '}'),
(6, 'stat'),
(7, 'ement4; statement5; sta'),
(8, 'tem'),
(9, 'ent6; statement7;')
]
self.assertEqual(self.extractor.ToStatementTuples(lines), [
(1, 1, 'void function()'),
(2, 3, 'int main(\n int argc, char* argv)'),
(4, 4, 'statement1'),
(4, 4, 'statement2'),
(5, 5, ''),
(6, 7, 'stat\nement4'),
(7, 7, 'statement5'),
(7, 9, 'sta\ntem\nent6'),
(9, 9, 'statement7'),
])
def testExtractApiExport(self):
self.assertEqual(
'sqlite3_init',
self.extractor.ExtractApiExport(
set(), 'SQLITE_API', 'SQLITE_API void sqlite3_init()'))
self.assertEqual(
'sqlite3_sleep',
self.extractor.ExtractApiExport(
set(), 'SQLITE_API', 'SQLITE_API int sqlite3_sleep(int ms)'))
self.assertEqual(
'sqlite3_sleep',
self.extractor.ExtractApiExport(
set(), 'SQLITE_API',
'SQLITE_API long long sqlite3_sleep(int ms)'))
self.assertEqual(
'sqlite3rbu_temp_size',
self.extractor.ExtractApiExport(
set(), 'SQLITE_API',
'SQLITE_API sqlite3_int64 sqlite3rbu_temp_size(sqlite3rbu *pRbu)'))
self.assertEqual(
'sqlite3_expired',
self.extractor.ExtractApiExport(
set(['SQLITE_DEPRECATED']), 'SQLITE_API',
'SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*)'))
# SQLite's header actually #defines double (in some cases).
self.assertEqual(
'sqlite3_column_double',
self.extractor.ExtractApiExport(
set(['double']), 'SQLITE_API',
'SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol)'))
self.assertEqual(
'sqlite3_temp_directory',
self.extractor.ExtractApiExport(
set(['SQLITE_EXTERN']), 'SQLITE_API',
'SQLITE_API SQLITE_EXTERN char *sqlite3_temp_directory'))
self.assertEqual(
'sqlite3_version',
self.extractor.ExtractApiExport(
set(['SQLITE_EXTERN']), 'SQLITE_API',
'SQLITE_API SQLITE_EXTERN const char sqlite3_version[]'))
self.assertEqual(
None,
self.extractor.ExtractApiExport(
set(['SQLITE_DEPRECATED']), 'SQLITE_API',
'NOT_SQLITE_API struct sqlite_type sqlite3_sleep(int ms)'))
with self.assertRaisesRegex(self.extractor.ExtractError,
'Mixed simple .* and composite'):
self.extractor.ExtractApiExport(
set(), 'SQLITE_API', 'SQLITE_API void int sqlite3_sleep(int ms)')
with self.assertRaisesRegex(self.extractor.ExtractError,
'Unsupported keyword struct'):
self.extractor.ExtractApiExport(
set(), 'SQLITE_API',
'SQLITE_API struct sqlite_type sqlite3_sleep(int ms)')
with self.assertRaisesRegex(self.extractor.ExtractError,
'int\+\+ parsed as type name'):
self.extractor.ExtractApiExport(
set(), 'SQLITE_API', 'SQLITE_API int++ sqlite3_sleep(int ms)')
with self.assertRaisesRegex(self.extractor.ExtractError,
'sqlite3\+sleep parsed as symbol'):
self.extractor.ExtractApiExport(
set(), 'SQLITE_API', 'SQLITE_API int sqlite3+sleep(int ms)')
def testExportedSymbolLine(self):
self.assertEqual(
'#define sqlite3_sleep chrome_sqlite3_sleep // Line 42',
self.extractor.ExportedSymbolLine(
'chrome_', 'sqlite3_sleep',
(42, 42, 'SQLITE_API int chrome_sqlite3_sleep(int ms)')))
self.assertEqual(
'#define sqlite3_sleep chrome_sqlite3_sleep // Lines 42-44',
self.extractor.ExportedSymbolLine(
'chrome_', 'sqlite3_sleep',
(42, 44, 'SQLITE_API int chrome_sqlite3_sleep(int ms)')))
def testExportedExceptionLine(self):
self.assertEqual(
'// TODO: Lines 42-44 -- Something went wrong',
self.extractor.ExportedExceptionLine(
self.extractor.ExtractError('Something went wrong'),
(42, 44, 'SQLITE_API int chrome_sqlite3_sleep(int ms)')))
def testProcessSource(self):
file_content = '\n'.join([
'/*',
'struct sqlite_type sqlite3_sleep; // Remove comments',
'*/',
'#define SQLITE_DEPRECATED',
'SQLITE_API int sqlite3_sleep(int ms);',
'SQLITE_API struct sqlite_type sqlite3_sleep(int ms);',
'SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*);',
])
golden_output = [
'// Header',
'#define sqlite3_expired chrome_sqlite3_expired // Line 7',
'#define sqlite3_sleep chrome_sqlite3_sleep // Line 5',
'// TODO: Lines 6-6 -- Unsupported keyword struct',
'// Footer',
]
self.assertEqual(
golden_output,
self.extractor.ProcessSource('SQLITE_API', 'chrome_', '// Header',
'// Footer', file_content))
def testProcessSourceFile(self):
file_content = '\n'.join([
'/*',
'struct sqlite_type sqlite3_sleep; // Remove comments',
'*/',
'#define SQLITE_DEPRECATED',
'SQLITE_API int sqlite3_sleep(int ms);',
'SQLITE_API struct sqlite_type sqlite3_sleep(int ms);',
'SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*);',
])
golden_output = '\n'.join([
'// Header',
'#define sqlite3_expired chrome_sqlite3_expired // Line 7',
'#define sqlite3_sleep chrome_sqlite3_sleep // Line 5',
'// TODO: Lines 6-6 -- Unsupported keyword struct',
'// Footer',
'',
])
input_file = os.path.join(self.test_root, 'input.h')
output_file = os.path.join(self.test_root, 'macros.h')
with open(input_file, 'w') as f:
f.write(file_content)
self.extractor.ProcessSourceFile(
'SQLITE_API', 'chrome_', '// Header', '// Footer', input_file,
output_file)
with open(output_file, 'r') as f:
self.assertEqual(f.read(), golden_output)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -9,216,416,084,332,095,000 | 35.693103 | 80 | 0.589794 | false |
RuthAngus/K2rotation | tests/fit_all_the_light_curves.py | 1 | 3471 | import numpy as np
import matplotlib.pyplot as plt
import h5py
import glob
import fitsio
from gatspy.periodic import LombScargle
from scipy.signal import lombscargle
from SIP import SIP
"""
Fit all the light curves with the basis from c1 and then sample from those
weights
1. assemble_fnames - make a list of all K2 light curve names
2. load_lc - load x and y values of K2 light curve.
3. fit_lc - fit the basis to one light curve and return the weights.
4. fit_all_lcs - load file names, load data and fit all the light curves.
Return a 2d array of weights.
5. reconstruct_fake_lc - randomly sample nb weight values from the 2d array
and make a new fake light curve.
"""
def assemble_fnames():
fnames = []
path = "/export/bbq2/dfm/k2/web/lightcurves/c1"
i1s = glob.glob("%s/*" % path)
for i1 in i1s:
i2s = glob.glob("%s/*" % i1)
for i2 in i2s:
i3s = glob.glob("%s/*" % i2)
fnames.append(i3s)
fnames = [j for i in fnames for j in i]
return fnames
def load_lc(fname):
data = fitsio.read(fname)
aps = fitsio.read(fname, 2)
y = data["flux"][:, np.argmin(aps["cdpp6"])]
x = data["time"]
q = data["quality"]
l = np.isfinite(y) * np.isfinite(x) * (q==0)
y, x = y[l], x[l]
y /= np.median(y)
y -= 1
return x, y, l
def fit_lc(x, y, basis, nb):
# construct arrays
AT = np.ones((nb+1, len(y)))
AT[:-1, :] = basis
ATA = np.dot(AT, AT.T)
return np.linalg.solve(ATA, np.dot(AT, y))
def fit_all_lcs(nb):
# load basis
with h5py.File("../data/c1.h5", "r") as f:
basis = f["basis"][:nb]
# generate list of all k2 lc filenames
fnames = assemble_fnames()
# find the weight vectors for all the k2 light curves
weights = np.zeros((151, len(fnames)))
for i, fname in enumerate(fnames):
x, y, l = load_lc(fname)
weights[:, i] = fit_lc(x, y, basis.T[l].T, nb)
# save the weights
f = h5py.File("all_weights.h5", "w")
data = f.create_dataset("weights", np.shape(weights))
data[:, :] = weights[:, :]
return weights
def reconstruct_fake_lc(n=21646, nb=150):
# load the weights
with h5py.File("all_weights.h5", "r") as f:
weights = f["weights"][...]
# load basis
with h5py.File("../data/c1.h5", "r") as f:
basis = f["basis"][:nb]
# generate nb random numbers between 1 and the number of lcs
# to select the weights.
chosen_w = np.array([np.random.choice(weights[i]) for i in range(nb)])
fake_lc = np.sum(basis.T * chosen_w, axis=1)
return fake_lc
if __name__ == "__main__":
"""
Test by generating a fake light curve, injecting a sinusoid and
producing an SIP.
"""
fake_lc = reconstruct_fake_lc()
# load example star to get time array
path = "/export/bbq2/dfm/k2/web/lightcurves/c1/201100000/21000"
fname = "ktwo201121245-c01_lpd-lc.fits"
x, y, l = load_lc("%s/%s" % (path, fname))
nb = 150
# load basis
with h5py.File("../data/c1.h5", "r") as f:
basis = f["basis"][:nb, l]
# compute SIP
fs = np.arange(.01, 10., .01)
amp2s, s2n, w = SIP(x, fake_lc[l], basis, fs)
plt.clf()
plt.subplot(3, 1, 1)
plt.plot(fake_lc)
plt.subplot(3, 1, 2)
plt.plot(fs, s2n)
plt.subplot(3, 1, 3)
# inject sinusoid
fake_lc[l] += np.sin(5*np.pi*2*x)
amp2s, s2n, w = SIP(x, fake_lc[l], basis, fs)
plt.plot(fs, s2n)
plt.savefig("fake_lc")
| mit | -3,739,128,414,029,228,500 | 26.768 | 75 | 0.594641 | false |
ua-snap/downscale | snap_scripts/old_scripts/tem_iem_older_scripts_april2018/tem_inputs_iem/downscale_cru_tem_iem.py | 1 | 5457 | # downscale cru data in a CLI way
if __name__ == '__main__':
import glob, os, itertools, rasterio
from downscale import DeltaDownscale, Baseline, Dataset, utils, Mask
from functools import partial
import numpy as np
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-ts", "--ts", action='store', dest='cru_ts', type=str, help="path to the cru file to use in downscaling (.nc)" )
parser.add_argument( "-cl", "--clim_path", action='store', dest='clim_path', type=str, help="path to the directory where the 12 geotiff climatology files are stored" )
parser.add_argument( "-o", "--output_path", action='store', dest='output_path', type=str, help="path to the output directory" )
parser.add_argument( "-m", "--model", action='store', dest='model', type=str, help="cmip5 model name (exact)" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="cmip5 variable name (exact)" )
parser.add_argument( "-u", "--units", action='store', dest='units', type=str, help="string name of the units data are in" )
parser.add_argument( "-met", "--metric", action='store', dest='metric', type=str, help="string name of the metric data are in" )
parser.add_argument( "-nc", "--ncpus", action='store', dest='ncpus', type=int, help="number of cpus to use in multiprocessing" )
parser.add_argument( "-ov", "--out_varname", action='store', dest='out_varname', type=str, help="string name of output name to use instead of variable in file" )
args = parser.parse_args()
# unpack args
cru_ts = args.cru_ts
clim_path = args.clim_path
output_path = args.output_path
model = args.model
variable = args.variable
units = args.units
metric = args.metric
ncpus = args.ncpus
out_varname = args.out_varname
# # # # # TESTING
# cru_ts = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS323/cru_ts3.23.1901.2014.hur.dat_snap_conversion.nc'
# clim_path = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/tem_data_sep2016/cru/cru_cl20/hur'
# output_path = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/tem_data_sep2016/downscale'
# model = 'ts323'
# scenario = 'historical'
# variable = 'hur'
# units = 'pct'
# metric = 'mean'
# ncpus = 32
# out_varname = 'hur'
# # # # # # # # #
# standard args
clim_begin = '01-1961'
clim_end = '12-1990'
scenario = 'historical'
project = 'cru'
anom = False # write out anoms (True) or not (False)
interp = True
# RUN
filelist = glob.glob( os.path.join( clim_path, '*.tif' ) )
filelist = [ i for i in filelist if '_14_' not in i ] # remove the GD ANNUAL _14_ file.
baseline = Baseline( filelist )
# DOWNSCALE
mask = rasterio.open( baseline.filelist[0] ).read_masks( 1 )
# make round/trunc function for post_downscale_function
if variable in [ 'pr','pre' ]:
rounder = np.rint
downscaling_operation = 'mult'
find_bounds = True
fix_clim = True
# make AOI_Mask at input resolution for computing 95th percentiles...
if aoi_mask_fn is not None:
aoi_mask = Mask( aoi_mask_fn, historical, 1, 0 )
else:
aoi_mask = None
elif variable in ['hur','reh','cld','clt']:
rounder = partial( np.around, decimals=1 )
downscaling_operation = 'mult'
find_bounds = False
fix_clim = False
aoi_mask = None
elif variable in ['tas', 'tasmin', 'tasmax']:
rounder = partial( np.around, decimals=1 )
downscaling_operation = 'add'
find_bounds = False
fix_clim = False
aoi_mask = None
else:
AttributeError( '{} not found in conditions'.format( variable ) )
def round_it( arr ):
return rounder( arr )
def round_it( x, mask ):
arr = np.ma.masked_array( data=x, mask=mask )
return rounder( arr )
round_data = partial( round_it, mask=( mask==0 ) )
def round_data_clamp_hur( x ):
x[ x < 0.0 ] = 0.0
x[ x > 100.0 ] = 95.0 # per Stephanie McAfee
return round_data( x )
def round_data_clamp_clt( x ):
x[ x < 0.0 ] = 0.0
x[ x > 100.0 ] = 100.0 # per Stephanie McAfee
return round_data( x )
if variable == 'hur':
post_downscale_function = round_data_clamp_hur
if variable == 'clt':
post_downscale_function = round_data_clamp_clt
else:
post_downscale_function = round_data
# FOR CRU WE PASS THE interp=True so we interpolate across space first when creating the Dataset()
historical = Dataset( cru_ts, variable, model, scenario, project, units, metric,
method='linear', ncpus=32 )
# ar5 = DeltaDownscale( baseline, clim_begin, clim_end, historical, future=None,
# downscaling_operation=downscaling_operation, mask=mask, mask_value=0, ncpus=32,
# src_crs={'init':'epsg:4326'}, src_nodata=None, dst_nodata=None,
# post_downscale_function=round_it, varname=out_varname, modelname=None, anom=True )
# FOR CRU WE PASS THE interp=True so we interpolate across space first when creating the Dataset()
ar5 = DeltaDownscale( baseline, clim_begin, clim_end, historical, future=None,
downscaling_operation=downscaling_operation, mask=mask, mask_value=0, ncpus=32,
src_crs={'init':'epsg:4326'}, src_nodata=None, dst_nodata=None,
post_downscale_function=post_downscale_function, varname=out_varname, modelname=None,
anom=anom, interp=interp, find_bounds=find_bounds, fix_clim=fix_clim, aoi_mask=aoi_mask )
if not os.path.exists( output_path ):
os.makedirs( output_path )
ar5.downscale( output_dir=output_path )
| mit | -4,452,507,993,191,676,400 | 38.543478 | 168 | 0.684809 | false |
Letractively/spiff | src/FooLib/Interact.py | 1 | 1430 | # Copyright (C) 2007 Samuel Abels, http://debain.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import getpass
def get_user():
"""
Returns a tuple containing the username and the password.
May throw an exception if EOF is given by the user.
"""
# Read username and password.
try:
env_user = getpass.getuser()
user = raw_input('Please enter your user name [%s]: ' % env_user)
if user == '':
user = env_user
except:
user = raw_input('Please enter your user name: ' % user)
return user
def get_login():
"""
Returns a tuple containing the username and the password.
May throw an exception if EOF is given by the user.
"""
user = get_user()
password = getpass.getpass('Please enter your password: ')
return (user, password)
| gpl-2.0 | 3,094,274,287,615,304,000 | 34.75 | 77 | 0.681818 | false |
tulsluper/sanscript | apps/bc/scripts/x.py | 1 | 1064 | #!/usr/bin/env python3
import os
import importlib
import logging
from defs import run_with_locker
basepath = os.path.realpath(__file__)[:-3]
lockfile = basepath + '.lock'
logfile = basepath + '.log'
open(logfile, 'w').close()
logFormatter = logging.Formatter(
'%(asctime)s %(module)s %(levelname)s %(message)s',
'%Y-%m-%d %H:%M:%S'
)
rootLogger = logging.getLogger()
fileHandler = logging.FileHandler(logfile)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.INFO)
scripts = [
'run',
'db_sql',
]
@run_with_locker(lockfile)
def main():
logging.info('# START')
for script_name in scripts:
logging.info('# START {}'.format(script_name))
script = importlib.import_module(script_name)
script.main()
logging.info('# FINISH {}'.format(script_name))
logging.info('# FINISH')
return
if __name__ == '__main__':
main()
| gpl-3.0 | -1,863,859,130,094,256,600 | 20.28 | 55 | 0.677632 | false |
watsonyanghx/CNN_LSTM_CTC_Tensorflow | main.py | 1 | 8188 | """
"""
import datetime
import logging
import os
import time
import cv2
import numpy as np
import tensorflow as tf
import cnn_lstm_otc_ocr
import utils
import helper
FLAGS = utils.FLAGS
logger = logging.getLogger('Traing for OCR using CNN+LSTM+CTC')
logger.setLevel(logging.INFO)
def train(train_dir=None, val_dir=None, mode='train'):
model = cnn_lstm_otc_ocr.LSTMOCR(mode)
model.build_graph()
print('loading train data')
train_feeder = utils.DataIterator(data_dir=train_dir)
print('size: ', train_feeder.size)
print('loading validation data')
val_feeder = utils.DataIterator(data_dir=val_dir)
print('size: {}\n'.format(val_feeder.size))
num_train_samples = train_feeder.size # 100000
num_batches_per_epoch = int(num_train_samples / FLAGS.batch_size) # example: 100000/100
num_val_samples = val_feeder.size
num_batches_per_epoch_val = int(num_val_samples / FLAGS.batch_size) # example: 10000/100
shuffle_idx_val = np.random.permutation(num_val_samples)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph)
if FLAGS.restore:
ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
if ckpt:
# the global_step will restore sa well
saver.restore(sess, ckpt)
print('restore from checkpoint{0}'.format(ckpt))
print('=============================begin training=============================')
for cur_epoch in range(FLAGS.num_epochs):
shuffle_idx = np.random.permutation(num_train_samples)
train_cost = 0
start_time = time.time()
batch_time = time.time()
# the training part
for cur_batch in range(num_batches_per_epoch):
if (cur_batch + 1) % 100 == 0:
print('batch', cur_batch, ': time', time.time() - batch_time)
batch_time = time.time()
indexs = [shuffle_idx[i % num_train_samples] for i in
range(cur_batch * FLAGS.batch_size, (cur_batch + 1) * FLAGS.batch_size)]
batch_inputs, _, batch_labels = \
train_feeder.input_index_generate_batch(indexs)
# batch_inputs,batch_seq_len,batch_labels=utils.gen_batch(FLAGS.batch_size)
feed = {model.inputs: batch_inputs,
model.labels: batch_labels}
# if summary is needed
summary_str, batch_cost, step, _ = \
sess.run([model.merged_summay, model.cost, model.global_step, model.train_op], feed)
# calculate the cost
train_cost += batch_cost * FLAGS.batch_size
train_writer.add_summary(summary_str, step)
# save the checkpoint
if step % FLAGS.save_steps == 1:
if not os.path.isdir(FLAGS.checkpoint_dir):
os.mkdir(FLAGS.checkpoint_dir)
logger.info('save checkpoint at step {0}', format(step))
saver.save(sess, os.path.join(FLAGS.checkpoint_dir, 'ocr-model'), global_step=step)
# train_err += the_err * FLAGS.batch_size
# do validation
if step % FLAGS.validation_steps == 0:
acc_batch_total = 0
lastbatch_err = 0
lr = 0
for j in range(num_batches_per_epoch_val):
indexs_val = [shuffle_idx_val[i % num_val_samples] for i in
range(j * FLAGS.batch_size, (j + 1) * FLAGS.batch_size)]
val_inputs, _, val_labels = \
val_feeder.input_index_generate_batch(indexs_val)
val_feed = {model.inputs: val_inputs,
model.labels: val_labels}
dense_decoded, lastbatch_err, lr = \
sess.run([model.dense_decoded, model.cost, model.lrn_rate],
val_feed)
# print the decode result
ori_labels = val_feeder.the_label(indexs_val)
acc = utils.accuracy_calculation(ori_labels, dense_decoded,
ignore_value=-1, isPrint=True)
acc_batch_total += acc
accuracy = (acc_batch_total * FLAGS.batch_size) / num_val_samples
avg_train_cost = train_cost / ((cur_batch + 1) * FLAGS.batch_size)
# train_err /= num_train_samples
now = datetime.datetime.now()
log = "{}/{} {}:{}:{} Epoch {}/{}, " \
"accuracy = {:.3f},avg_train_cost = {:.3f}, " \
"lastbatch_err = {:.3f}, time = {:.3f},lr={:.8f}"
print(log.format(now.month, now.day, now.hour, now.minute, now.second,
cur_epoch + 1, FLAGS.num_epochs, accuracy, avg_train_cost,
lastbatch_err, time.time() - start_time, lr))
def infer(img_path, mode='infer'):
# imgList = load_img_path('/home/yang/Downloads/FILE/ml/imgs/image_contest_level_1_validate/')
imgList = helper.load_img_path(img_path)
print(imgList[:5])
model = cnn_lstm_otc_ocr.LSTMOCR(mode)
model.build_graph()
total_steps = len(imgList) / FLAGS.batch_size
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
if ckpt:
saver.restore(sess, ckpt)
print('restore from ckpt{}'.format(ckpt))
else:
print('cannot restore')
decoded_expression = []
for curr_step in range(total_steps):
imgs_input = []
seq_len_input = []
for img in imgList[curr_step * FLAGS.batch_size: (curr_step + 1) * FLAGS.batch_size]:
im = cv2.imread(img, 0).astype(np.float32) / 255.
im = np.reshape(im, [FLAGS.image_height, FLAGS.image_width, FLAGS.image_channel])
def get_input_lens(seqs):
length = np.array([FLAGS.max_stepsize for _ in seqs], dtype=np.int64)
return seqs, length
inp, seq_len = get_input_lens(np.array([im]))
imgs_input.append(im)
seq_len_input.append(seq_len)
imgs_input = np.asarray(imgs_input)
seq_len_input = np.asarray(seq_len_input)
seq_len_input = np.reshape(seq_len_input, [-1])
feed = {model.inputs: imgs_input}
dense_decoded_code = sess.run(model.dense_decoded, feed)
for item in dense_decoded_code:
expression = ''
for i in item:
if i == -1:
expression += ''
else:
expression += utils.decode_maps[i]
decoded_expression.append(expression)
with open('./result.txt', 'a') as f:
for code in decoded_expression:
f.write(code + '\n')
def main(_):
if FLAGS.num_gpus == 0:
dev = '/cpu:0'
elif FLAGS.num_gpus == 1:
dev = '/gpu:0'
else:
raise ValueError('Only support 0 or 1 gpu.')
with tf.device(dev):
if FLAGS.mode == 'train':
train(FLAGS.train_dir, FLAGS.val_dir, FLAGS.mode)
elif FLAGS.mode == 'infer':
infer(FLAGS.infer_dir, FLAGS.mode)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| mit | 4,733,250,009,559,625,000 | 37.805687 | 104 | 0.529433 | false |
Mystler/ShadowCraft-Engine | shadowcraft/objects/buffs.py | 1 | 8063 | from shadowcraft.core import exceptions
class InvalidBuffException(exceptions.InvalidInputException):
pass
class Buffs(object):
allowed_buffs = frozenset([
'short_term_haste_buff', # Heroism/Blood Lust, Time Warp
#'stat_multiplier_buff', # Mark of the Wild, Blessing of Kings, Legacy of the Emperor
#'crit_chance_buff', # Leader of the Pack, Legacy of the White Tiger, Arcane Brillance
#'haste_buff', # Swiftblade's Cunning, Unholy Aura
#'multistrike_buff', # Swiftblade's Cunning, ...
#'attack_power_buff', # Horn of Winter, Trueshot Aura, Battle Shout
#'mastery_buff', # Blessing of Might, Grace of Air
#'stamina_buff', # PW: Fortitude, Blood Pact, Commanding Shout
#'versatility_buff', #
#'spell_power_buff', # Dark Intent, Arcane Brillance
#'armor_debuff', # Sunder, Expose Armor, Faerie Fire
#'physical_vulnerability_debuff', # Brittle Bones, Ebon Plaguebringer, Judgments of the Bold, Colossus Smash
#'spell_damage_debuff', # Master Poisoner, Curse of Elements
#'slow_casting_debuff',
'mortal_wounds_debuff',
# consumables
'flask_wod_agi_200', #
'flask_wod_agi', # 250
'food_wod_mastery', # 100
'food_wod_mastery_75', # 75
'food_wod_mastery_125', # 125
'food_wod_crit', #
'food_wod_crit_75', #
'food_wod_crit_125', #
'food_wod_haste', #
'food_wod_haste_75', #
'food_wod_haste_125', #
'food_wod_versatility', #
'food_wod_versatility_75', #
'food_wod_versatility_125', #
'food_felmouth_frenzy', # Felmouth frenzy, 2 haste scaling RPPM dealing 0.424 AP in damage
###LEGION###
'flask_legion_agi', # Flask of the Seventh Demon
'food_legion_mastery_225', # Pickeled Stormray
'food_legion_crit_225', # Salt & Pepper Shank
'food_legion_haste_225', # Deep-Fried Mossgill
'food_legion_versatility_225', # Faronaar Fizz
'food_legion_mastery_300', # Barracude Mrglgagh
'food_legion_crit_300', # Leybeque Ribs
'food_legion_haste_300', # Suramar Surf and Turf
'food_legion_versatility_300', # Koi-Scented Stormray
'food_legion_mastery_375', # Nightborne Delicacy Platter
'food_legion_crit_375', # The Hungry Magister
'food_legion_haste_375', # Azshari Salad
'food_legion_versatility_375', # Seed-Battered Fish Plate
'food_legion_damage_1', # Spiced Rib Roast
'food_legion_damage_2', # Drogbar-Style Salmon
'food_legion_damage_3', # Fishbrul Special
'food_legion_feast_150',
'food_legion_feast_200',
])
buffs_debuffs = frozenset([
'short_term_haste_buff', # Heroism/Blood Lust, Time Warp
#'stat_multiplier_buff', # Mark of the Wild, Blessing of Kings, Legacy of the Emperor
#'crit_chance_buff', # Leader of the Pack, Legacy of the White Tiger, Arcane Brillance
#'haste_buff', # Swiftblade's Cunning, Unholy Aura
#'multistrike_buff', # Swiftblade's Cunning, ...
#'attack_power_buff', # Horn of Winter, Trueshot Aura, Battle Shout
#'mastery_buff', # Blessing of Might, Grace of Air
#'spell_power_buff', # Dark Intent, Arcane Brillance
#'versatility_buff',
#'stamina_buff', # PW: Fortitude, Blood Pact, Commanding Shout
#'physical_vulnerability_debuff', # Brittle Bones, Ebon Plaguebringer, Judgments of the Bold, Colossus Smash
#'spell_damage_debuff', # Master Poisoner, Curse of Elements
'mortal_wounds_debuff',
])
def __init__(self, *args, **kwargs):
for buff in args:
if buff not in self.allowed_buffs:
raise InvalidBuffException(_('Invalid buff {buff}').format(buff=buff))
setattr(self, buff, True)
def __getattr__(self, name):
# Any buff we haven't assigned a value to, we don't have.
if name in self.allowed_buffs:
return False
object.__getattribute__(self, name)
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
def get_stat_bonuses(self, epicurean=False):
bonuses = {
'agi': self.buff_agi(epicurean),
'crit': self.buff_crit(epicurean),
'haste': self.buff_haste(epicurean),
'mastery': self.buff_mast(epicurean),
'versatility': self.buff_versatility(epicurean),
}
return bonuses
def buff_agi(self, race=False):
bonus_agi = 0
bonus_agi += 200 * self.flask_wod_agi_200
bonus_agi += 250 * self.flask_wod_agi
bonus_agi += 1300 * self.flask_legion_agi
bonus_agi += 150 * self.food_legion_feast_150 * [1, 2][race]
bonus_agi += 200 * self.food_legion_feast_200 * [1, 2][race]
return bonus_agi
def buff_haste(self, race=False):
bonus_haste = 0
bonus_haste += 125 * self.food_wod_haste_125 * [1, 2][race]
bonus_haste += 100 * self.food_wod_haste * [1, 2][race]
bonus_haste += 75 * self.food_wod_haste_75 * [1, 2][race]
bonus_haste += 225 * self.food_legion_haste_225 * [1, 2][race]
bonus_haste += 300 * self.food_legion_haste_300 * [1, 2][race]
bonus_haste += 375 * self.food_legion_haste_375 * [1, 2][race]
return bonus_haste
def buff_crit(self, race=False):
bonus_crit = 0
bonus_crit += 125 * self.food_wod_crit_125 * [1, 2][race]
bonus_crit += 100 * self.food_wod_crit * [1, 2][race]
bonus_crit += 75 * self.food_wod_crit_75 * [1, 2][race]
bonus_crit += 225 * self.food_legion_crit_225 * [1, 2][race]
bonus_crit += 300 * self.food_legion_crit_300 * [1, 2][race]
bonus_crit += 375 * self.food_legion_crit_375 * [1, 2][race]
return bonus_crit
def buff_mast(self, race=False):
bonus_mastery = 0
bonus_mastery += 125 * self.food_wod_mastery_125 * [1, 2][race]
bonus_mastery += 100 * self.food_wod_mastery * [1, 2][race]
bonus_mastery += 75 * self.food_wod_mastery_75 * [1, 2][race]
bonus_mastery += 225 * self.food_legion_mastery_225 * [1, 2][race]
bonus_mastery += 300 * self.food_legion_mastery_300 * [1, 2][race]
bonus_mastery += 375 * self.food_legion_mastery_375 * [1, 2][race]
return bonus_mastery
def buff_versatility(self, race=False):
bonus_versatility = 0
bonus_versatility += 125 * self.food_wod_versatility_125 * [1, 2][race]
bonus_versatility += 100 * self.food_wod_versatility * [1, 2][race]
bonus_versatility += 75 * self.food_wod_versatility_75 * [1, 2][race]
bonus_versatility += 225 * self.food_legion_versatility_225 * [1, 2][race]
bonus_versatility += 300 * self.food_legion_versatility_300 * [1, 2][race]
bonus_versatility += 375 * self.food_legion_versatility_375 * [1, 2][race]
return bonus_versatility
def felmouth_food(self):
if self.food_felmouth_frenzy :
return True
return False
def damage_food(self):
if self.food_legion_damage_1:
return 1
if self.food_legion_damage_2:
return 2
if self.food_legion_damage_3:
return 3
return 0
| lgpl-3.0 | 4,676,780,736,006,268,000 | 48.466258 | 119 | 0.546199 | false |
acuros/noopy | noopy/deployer/apigateway.py | 1 | 5571 | import importlib
import uuid
import boto3
from botocore.exceptions import ClientError
from noopy import settings
from noopy.endpoint import Endpoint, Resource
class ApiGatewayDeployer(object):
def __init__(self, function_arn, stage):
self.function_arn = function_arn
self.stage = stage
self.client = boto3.client('apigateway')
apis = self.client.get_rest_apis()['items']
filtered_apis = [api for api in apis if api['name'] == settings.PROJECT_NAME]
if filtered_apis:
self.api_id = filtered_apis[0]['id']
else:
self.api_id = self.client.create_rest_api(name=settings.PROJECT_NAME)['id']
self.aws_resources = self.client.get_resources(restApiId=self.api_id, limit=500)['items']
@property
def function_uri(self):
return 'arn:aws:apigateway:{}:lambda:path/2015-03-31/functions/{}/invocations'.format(
self.client._client_config.region_name,
self.function_arn
)
def deploy(self, dir_):
self._discover_endpoints()
if not Endpoint.endpoints:
return
self.add_permision()
self.deploy_resources()
self.deploy_methods()
self.deploy_stage()
return 'https://{}.execute-api.{}.amazonaws.com/{}'.format(
self.api_id,
self.client._client_config.region_name,
self.stage
)
def deploy_resources(self):
aws_resources = self.client.get_resources(restApiId=self.api_id, limit=500)['items']
aws_resource_by_path = dict((r['path'], r) for r in aws_resources)
for path, noopy_resource in Resource.resources.iteritems():
aws_resource = aws_resource_by_path.get(path)
if aws_resource:
noopy_resource.id = aws_resource['id']
self.create_omitted_resources(set(aws_resource_by_path.keys()), Resource.resources['/'])
def deploy_methods(self):
resources = self.client.get_resources(restApiId=self.api_id, limit=500)['items']
resources_by_path = dict((r['path'], r) for r in resources)
for endpoint, func in Endpoint.endpoints.iteritems():
method = str(endpoint.method)
resource = resources_by_path.get(endpoint.path)
if method in resource.get('resourceMethods', {}):
self._update_integration(resource, method)
else:
self._put_method(resource, method)
def _update_integration(self, resource, method):
self.client.update_integration(
restApiId=self.api_id,
resourceId=resource['id'],
httpMethod=method,
patchOperations=[
{
'op': 'replace',
'path': '/uri',
'value': self.function_uri
}
]
)
def _put_method(self, resource, method):
self.client.put_method(
restApiId=self.api_id,
resourceId=resource['id'],
httpMethod=method,
authorizationType=''
)
template = '{"path": "$context.resourcePath", "method": "$context.httpMethod",' \
'"params": $input.json(\'$\'), "type": "APIGateway"}'
self.client.put_integration(
restApiId=self.api_id,
resourceId=resource['id'],
httpMethod=method,
integrationHttpMethod='POST',
requestTemplates={
'application/json': template
},
type='AWS',
uri=self.function_uri,
)
self.client.put_method_response(
restApiId=self.api_id,
resourceId=resource['id'],
httpMethod=method,
statusCode='200',
responseModels={
'application/json': 'Empty'
}
)
self.client.put_integration_response(
restApiId=self.api_id,
resourceId=resource['id'],
httpMethod=method,
statusCode='200',
selectionPattern=''
)
def add_permision(self):
lambda_client = boto3.client('lambda')
source_arn = 'arn:aws:execute-api:{}:{}:{}/*/*/*'.format(
self.client._client_config.region_name,
settings.ACCOUNT_ID,
self.api_id
)
try:
lambda_client.add_permission(
FunctionName=self.function_arn,
StatementId=str(uuid.uuid1()),
Action='lambda:InvokeFunction',
Principal='apigateway.amazonaws.com',
SourceArn=source_arn
)
except ClientError:
pass
def deploy_stage(self):
self.client.create_deployment(restApiId=self.api_id, stageName=self.stage)
def create_omitted_resources(self, exist_path, parent):
for child in parent.children:
if child.path not in exist_path:
created = self.client.create_resource(
restApiId=self.api_id,
parentId=parent.id,
pathPart=child.path.split('/')[-1]
)
exist_path.add(child.path)
self.aws_resources.append(created)
child.id = created['id']
if child.children:
self.create_omitted_resources(exist_path, child)
@staticmethod
def _discover_endpoints():
for lambda_module in settings.LAMBDA_MODULES:
importlib.import_module(lambda_module) | mit | 5,562,871,211,197,979,000 | 34.265823 | 97 | 0.55771 | false |
pnisarg/ABSA | tools/hindi_wordnet_python/demo.py | 1 | 1955 | # -*- coding: utf-8 -*-
"""
Demo program of Hindi WordNet in Python.
Here I demonstrate all the functionalities of the libraries, but note you can load only the pickle files which are necessary for your task rather than loading every pickle file. Loading of pickle files takes time and memory. But once loaded, all your WordNet operations are just O(1) which means your WordNet lookup is no longer a bottleneck.
Developer: Siva Reddy <[email protected]>
Please point http://sivareddy.in/downloads for others to find these python libraries.
"""
import pickle
word2Synset = pickle.load(open("WordSynsetDict.pk"))
synset2Onto = pickle.load(open("SynsetOnto.pk"))
synonyms = pickle.load(open("SynsetWords.pk"))
synset2Gloss = pickle.load(open("SynsetGloss.pk"))
synset2Hypernyms = pickle.load(open("SynsetHypernym.pk"))
synset2Hyponyms = pickle.load(open("SynsetHyponym.pk"))
synset2Hypernyms = pickle.load(open("SynsetHypernym.pk"))
word = "खाना".decode('utf-8', 'ignore')
while True:
if word2Synset.has_key(word):
synsets = word2Synset[word]
print "Word -->", "खाना "
for pos in synsets.keys():
print "POS Category -->", pos
for synset in synsets[pos]:
print "\t\tSynset -->", synset
if synonyms.has_key(synset):
print "\t\t\t Synonyms -->", synonyms[synset]
if synset2Gloss.has_key(synset):
print "\t\t\t Synset Gloss", synset2Gloss[synset]
if synset2Onto.has_key(synset):
print "\t\t\t Ontological Categories", synset2Onto[synset]
if synset2Hypernyms.has_key(synset):
print "\t\t\t Hypernym Synsets", synset2Hypernyms[synset]
if synset2Hyponyms.has_key(synset):
print "\t\t\t Hyponym Synsets", synset2Hyponyms[synset]
word = raw_input("Enter a word: ").decode("utf-8", "ignore")
| mit | 7,751,544,496,335,239,000 | 45.166667 | 342 | 0.649819 | false |
paulmartel/voltdb | tests/scripts/examples/sql_coverage/config.py | 1 | 7780 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2016 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
{
"basic-compoundex": {"schema": "schema.py",
"ddl": "compoundex-DDL.sql",
"template": "compound.sql",
"normalizer": "nulls-lowest-normalizer.py"},
# BIGINT OVERFLOW CAUSES FAILURES IN THIS SUITE, DISABLING
# also, the generator fails to generates statements for:
# Template "SELECT * FROM _table WHERE (_variable _cmp _value[int64]) _logic (_variable _cmp _variable)" failed to yield SQL statements
# Template "UPDATE _table SET BIG = _value[int64] WHERE (_variable _cmp _variable) _logic (_variable _cmp _value[int64])" failed to yield SQL statements
# Template "DELETE FROM _table WHERE (_variable _cmp _variable) _logic (_variable _cmp _value[int64])" failed to yield SQL statements
# because there are insufficient columns of the same type to satisfy all the _variables
# given how the generator works.
"basic-ints": {"schema": "int-schema.py",
"ddl": "int-DDL.sql",
"template": "basic-ints.sql",
"normalizer": "normalizer.py"},
# HSQL SEEMS TO HAVE A BAD DEFAULT PRECISION, DISABLING
# "basic-decimal": {"schema": "decimal-schema.py",
# "ddl": "DDL.sql",
# "template": "basic-decimal.sql",
# "normalizer": "normalizer.py"},
# Floating point rounding differences lead to deltas
# "basic-timestamp": {"schema": "timestamp-schema.py",
# "ddl": "timestamp-DDL.sql",
# "template": "basic-timestamp.sql",
# "normalizer": "normalizer.py"},
# BIGINT OVERFLOW CAUSES FAILURES IN THIS SUITE
# also, the generator fails to generate statements for:
# Template "UPDATE _table SET BIG = _value[int64] WHERE (_variable _cmp _variable) _logic (_variable _cmp _value[int64])" failed to yield SQL statements
# Template "DELETE FROM _table WHERE (_variable _cmp _variable) _logic (_variable _cmp _value[int64])" failed to yield SQL statements
# "basic-matview": {"schema": "matview-schema.py",
# "ddl": "int-DDL.sql",
# "template": "basic-matview.sql",
# "normalizer": "normalizer.py"},
"joined-matview-default-full": {"schema": "joined-matview-schema.py",
"ddl": "joined-matview-DDL.sql",
"template": "joined-matview-default-full.sql",
"normalizer": "normalizer.py"},
"joined-matview-int": {"schema": "joined-matview-int-schema.py",
"ddl": "joined-matview-int-DDL.sql",
"template": "joined-matview-int.sql",
"normalizer": "nulls-lowest-normalizer.py"},
"joined-matview-string": {"schema": "joined-matview-string-schema.py",
"ddl": "joined-matview-string-DDL.sql",
"template": "joined-matview-string.sql",
"normalizer": "normalizer.py"},
"joined-matview-timestamp": {"schema": "joined-matview-timestamp-schema.py",
"ddl": "joined-matview-timestamp-DDL.sql",
"template": "joined-matview-timestamp.sql",
"normalizer": "normalizer.py"},
"basic-joins": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "basic-joins.sql",
"normalizer": "normalizer.py"},
"basic-index-joins": {"schema": "schema.py",
"ddl": "index-DDL.sql",
"template": "basic-joins.sql",
"normalizer": "normalizer.py"},
"basic-index-joins_extra": {"schema": "index-join-schema.py",
"ddl": "index-DDL.sql",
"template": "basic-index-joins.sql",
"normalizer": "normalizer.py"},
"basic-compoundex-joins": {"schema": "schema.py",
"ddl": "compoundex-DDL.sql",
"template": "basic-joins.sql",
"normalizer": "normalizer.py"},
# TODO: Need to scale down precision of values to keep HSQL happy even after math
"numeric-decimals": {"schema": "decimal-schema.py",
"ddl": "DDL.sql",
"template": "numeric-decimals.sql",
"normalizer": "normalizer.py",
"precision": "9"},
"numeric-ints": {"schema": "int-schema.py",
"ddl": "int-DDL.sql",
"template": "numeric-ints.sql",
"normalizer": "normalizer.py",
"precision": "9"},
# HSQL SEEMS TO HAVE A BAD DEFAULT PRECISION, DISABLING
# "advanced-decimal": {"schema": "decimal-schema.py",
# "ddl": "DDL.sql",
# "template": "advanced-decimal.sql",
# "normalizer": "normalizer.py"},
"advanced-joins": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "advanced-joins.sql",
"normalizer": "nulls-lowest-normalizer.py"},
"advanced-index-joins": {"schema": "schema.py",
"ddl": "index-DDL.sql",
"template": "advanced-joins.sql",
"normalizer": "nulls-lowest-normalizer.py"},
"advanced-subq-joins": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "advanced-subq-joins.sql",
"normalizer": "nulls-lowest-normalizer.py"},
"advanced-subq-part-joins": {"schema": "schema.py",
"ddl": "subq-part-DDL.sql",
"template": "advanced-subq-part-joins.sql",
"normalizer": "normalizer.py"},
"advanced-compoundex-joins": {"schema": "schema.py",
"ddl": "compoundex-DDL.sql",
"template": "advanced-joins.sql",
"normalizer": "nulls-lowest-normalizer.py"},
"advanced-matview-subq-nonjoin": {"schema": "matview-advanced-nonjoin-schema.py",
"ddl": "matview-DDL.sql",
"template": "advanced-matview-subq-nonjoin.sql",
"normalizer": "nulls-lowest-normalizer.py"},
}
| agpl-3.0 | -4,743,790,034,531,805,000 | 57.939394 | 152 | 0.536247 | false |
p2pu/learning-circles | studygroups/migrations/0001_initial.py | 1 | 2548 | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=128)),
('provider', models.CharField(max_length=256)),
('link', models.URLField()),
('start_date', models.DateField()),
('duration', models.CharField(max_length=128)),
('prerequisite', models.TextField()),
('time_required', models.CharField(max_length=128)),
('caption', models.TextField()),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StudyGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('location', models.CharField(max_length=128)),
('location_link', models.URLField()),
('time', models.CharField(max_length=128)),
('max_size', models.IntegerField()),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('course', models.ForeignKey(to='studygroups.Course', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StudyGroupSignup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('username', models.CharField(max_length=128)),
('email', models.EmailField(max_length=75)),
('created_at', models.DateTimeField(auto_now_add=True)),
('study_group', models.ForeignKey(to='studygroups.StudyGroup', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='studygroupsignup',
unique_together=set([('email', 'study_group')]),
),
]
| mit | 1,566,064,058,333,559,000 | 38.2 | 114 | 0.517661 | false |
Eric89GXL/vispy | vispy/geometry/calculations.py | 1 | 4246 | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""Miscellaneous functions
"""
import numpy as np
###############################################################################
# These fast normal calculation routines are adapted from mne-python
def _fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def _calculate_normals(rr, tris):
"""Efficiently compute vertex normals for triangulated surface"""
# ensure highest precision for our summation/vectorization "trick"
rr = rr.astype(np.float64)
# first, compute triangle normals
r1 = rr[tris[:, 0], :]
r2 = rr[tris[:, 1], :]
r3 = rr[tris[:, 2], :]
tri_nn = _fast_cross_3d((r2 - r1), (r3 - r1))
# Triangle normals and areas
size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
tri_nn /= size[:, np.newaxis]
npts = len(rr)
# the following code replaces this, but is faster (vectorized):
#
# for p, verts in enumerate(tris):
# nn[verts, :] += tri_nn[p, :]
#
nn = np.zeros((npts, 3))
for verts in tris.T: # note this only loops 3x (number of verts per tri)
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts.astype(np.int32),
tri_nn[:, idx], minlength=npts)
size = np.sqrt(np.sum(nn * nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
nn /= size[:, np.newaxis]
return nn
def resize(image, shape, kind='linear'):
"""Resize an image
Parameters
----------
image : ndarray
Array of shape (N, M, ...).
shape : tuple
2-element shape.
kind : str
Interpolation, either "linear" or "nearest".
Returns
-------
scaled_image : ndarray
New image, will have dtype np.float64.
"""
image = np.array(image, float)
shape = np.array(shape, int)
if shape.ndim != 1 or shape.size != 2:
raise ValueError('shape must have two elements')
if image.ndim < 2:
raise ValueError('image must have two dimensions')
if not isinstance(kind, str) or kind not in ('nearest', 'linear'):
raise ValueError('mode must be "nearest" or "linear"')
r = np.linspace(0, image.shape[0] - 1, shape[0])
c = np.linspace(0, image.shape[1] - 1, shape[1])
if kind == 'linear':
r_0 = np.floor(r).astype(int)
c_0 = np.floor(c).astype(int)
r_1 = r_0 + 1
c_1 = c_0 + 1
top = (r_1 - r)[:, np.newaxis]
bot = (r - r_0)[:, np.newaxis]
lef = (c - c_0)[np.newaxis, :]
rig = (c_1 - c)[np.newaxis, :]
c_1 = np.minimum(c_1, image.shape[1] - 1)
r_1 = np.minimum(r_1, image.shape[0] - 1)
for arr in (top, bot, lef, rig):
arr.shape = arr.shape + (1,) * (image.ndim - 2)
out = top * rig * image[r_0][:, c_0, ...]
out += bot * rig * image[r_1][:, c_0, ...]
out += top * lef * image[r_0][:, c_1, ...]
out += bot * lef * image[r_1][:, c_1, ...]
else: # kind == 'nearest'
r = np.round(r).astype(int)
c = np.round(c).astype(int)
out = image[r][:, c, ...]
return out
| bsd-3-clause | -4,810,434,838,505,294,000 | 30.451852 | 79 | 0.521432 | false |
epfl-lts2/pygsp | pygsp/graphs/randomring.py | 1 | 2637 | # -*- coding: utf-8 -*-
import numpy as np
from scipy import sparse
from pygsp import utils
from . import Graph # prevent circular import in Python < 3.5
class RandomRing(Graph):
r"""Ring graph with randomly sampled vertices.
Parameters
----------
N : int
Number of vertices.
angles : array_like, optional
The angular coordinate, in :math:`[0, 2\pi]`, of the vertices.
seed : int
Seed for the random number generator (for reproducible graphs).
Examples
--------
>>> import matplotlib.pyplot as plt
>>> G = graphs.RandomRing(N=10, seed=42)
>>> fig, axes = plt.subplots(1, 2)
>>> _ = axes[0].spy(G.W)
>>> _ = G.plot(ax=axes[1])
>>> _ = axes[1].set_xlim(-1.1, 1.1)
>>> _ = axes[1].set_ylim(-1.1, 1.1)
"""
def __init__(self, N=64, angles=None, seed=None, **kwargs):
self.seed = seed
if angles is None:
rs = np.random.RandomState(seed)
angles = np.sort(rs.uniform(0, 2*np.pi, size=N), axis=0)
else:
angles = np.asanyarray(angles)
angles.sort() # Need to be sorted to take the difference.
N = len(angles)
if np.any(angles < 0) or np.any(angles >= 2*np.pi):
raise ValueError('Angles should be in [0, 2 pi]')
self.angles = angles
if N < 3:
# Asymmetric graph needed for 2 as 2 distances connect them.
raise ValueError('There should be at least 3 vertices.')
rows = range(0, N-1)
cols = range(1, N)
weights = np.diff(angles)
# Close the loop.
rows = np.concatenate((rows, [0]))
cols = np.concatenate((cols, [N-1]))
weights = np.concatenate((weights, [2*np.pi + angles[0] - angles[-1]]))
W = sparse.coo_matrix((weights, (rows, cols)), shape=(N, N))
W = utils.symmetrize(W, method='triu')
# Width as the expected angle. All angles are equal to that value when
# the ring is uniformly sampled.
width = 2 * np.pi / N
assert (W.data.mean() - width) < 1e-10
# TODO: why this kernel ? It empirically produces eigenvectors closer
# to the sines and cosines.
W.data = width / W.data
coords = np.stack([np.cos(angles), np.sin(angles)], axis=1)
plotting = {'limits': np.array([-1, 1, -1, 1])}
# TODO: save angle and 2D position as graph signals
super(RandomRing, self).__init__(W, coords=coords, plotting=plotting,
**kwargs)
def _get_extra_repr(self):
return dict(seed=self.seed)
| bsd-3-clause | 6,646,018,641,626,723,000 | 31.555556 | 79 | 0.555556 | false |
tkolhar/robottelo | tests/foreman/cli/test_location.py | 1 | 20571 | # -*- encoding: utf-8 -*-
"""Test class for Location CLI"""
from fauxfactory import gen_string
from random import randint
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.factory import (
CLIFactoryError,
make_compute_resource,
make_domain,
make_environment,
make_hostgroup,
make_location,
make_medium,
make_subnet,
make_template,
make_user,
)
from robottelo.cli.location import Location
from robottelo.datafactory import invalid_values_list
from robottelo.decorators import skip_if_bug_open, tier1
from robottelo.test import CLITestCase
def valid_loc_data_list():
"""List of valid data for input testing.
Note: The maximum allowed length of location name is 246 only. This is an
intended behavior (Also note that 255 is the standard across other
entities.)
"""
return [
gen_string('alphanumeric', randint(1, 246)),
gen_string('alpha', randint(1, 246)),
gen_string('cjk', randint(1, 85)),
gen_string('latin1', randint(1, 246)),
gen_string('numeric', randint(1, 246)),
gen_string('utf8', randint(1, 85)),
gen_string('html', randint(1, 85)),
]
class LocationTestCase(CLITestCase):
"""Tests for Location via Hammer CLI"""
# TODO Add coverage for smart_proxy and realm once we can create ssh tunnel
@tier1
def test_positive_create_with_name(self):
"""@Test: Try to create location using different value types as a name
@Feature: Location
@Assert: Location is created successfully and has proper name
"""
for name in valid_loc_data_list():
with self.subTest(name):
loc = make_location({'name': name})
self.assertEqual(loc['name'], name)
@skip_if_bug_open('bugzilla', 1233612)
@tier1
def test_positive_create_with_description(self):
"""@Test: Create new location with custom description
@Feature: Location
@Assert: Location created successfully and has expected and correct
description
"""
description = gen_string('utf8')
loc = make_location({'description': description})
self.assertEqual(loc['description'], description)
@tier1
def test_positive_create_with_user_by_id(self):
"""@Test: Create new location with assigned user to it. Use user id as
a parameter
@Feature: Location
@Assert: Location created successfully and has correct user assigned to
it with expected login name
"""
user = make_user()
loc = make_location({'user-ids': user['id']})
self.assertEqual(loc['users'][0], user['login'])
@tier1
def test_positive_create_with_user_by_name(self):
"""@Test: Create new location with assigned user to it. Use user login
as a parameter
@Feature: Location
@Assert: Location created successfully and has correct user assigned to
it with expected login name
"""
user = make_user()
loc = make_location({'users': user['login']})
self.assertEqual(loc['users'][0], user['login'])
@tier1
def test_positive_create_with_compresource_by_id(self):
"""@Test: Create new location with compute resource assigned to it. Use
compute resource id as a parameter
@Feature: Location
@Assert: Location created successfully and has correct compute resource
assigned to it
"""
comp_resource = make_compute_resource()
loc = make_location({'compute-resource-ids': comp_resource['id']})
self.assertEqual(loc['compute-resources'][0], comp_resource['name'])
@tier1
def test_positive_create_with_compresource_by_name(self):
"""@Test: Create new location with compute resource assigned to it. Use
compute resource name as a parameter
@Feature: Location
@Assert: Location created successfully and has correct compute resource
assigned to it
"""
comp_resource = make_compute_resource()
loc = make_location({'compute-resources': comp_resource['name']})
self.assertEqual(loc['compute-resources'][0], comp_resource['name'])
@tier1
def test_positive_create_with_template_by_id(self):
"""@Test: Create new location with config template assigned to it. Use
config template id as a parameter
@Feature: Location
@Assert: Location created successfully and list of config templates
assigned to that location should contain expected one
"""
template = make_template()
loc = make_location({'config-template-ids': template['id']})
self.assertGreaterEqual(len(loc['templates']), 1)
self.assertIn(
u'{0} ({1})'. format(template['name'], template['type']),
loc['templates']
)
@tier1
def test_positive_create_with_template_by_name(self):
"""@Test: Create new location with config template assigned to it. Use
config template name as a parameter
@Feature: Location
@Assert: Location created successfully and list of config templates
assigned to that location should contain expected one
"""
template = make_template()
loc = make_location({'config-templates': template['name']})
self.assertGreaterEqual(len(loc['templates']), 1)
self.assertIn(
u'{0} ({1})'. format(template['name'], template['type']),
loc['templates']
)
@tier1
def test_positive_create_with_domain_by_id(self):
"""@Test: Create new location with assigned domain to it. Use domain id
as a parameter
@Feature: Location
@Assert: Location created successfully and has correct and expected
domain assigned to it
"""
domain = make_domain()
loc = make_location({'domain-ids': domain['id']})
self.assertEqual(loc['domains'][0], domain['name'])
@tier1
def test_positive_create_with_domain_by_name(self):
"""@Test: Create new location with assigned domain to it. Use domain
name as a parameter
@Feature: Location
@Assert: Location created successfully and has correct and expected
domain assigned to it
"""
domain = make_domain()
loc = make_location({'domains': domain['name']})
self.assertEqual(loc['domains'][0], domain['name'])
@tier1
def test_positive_create_with_subnet_by_id(self):
"""@Test: Create new location with assigned subnet to it. Use subnet id
as a parameter
@Feature: Location
@Assert: Location created successfully and has correct subnet with
expected network address assigned to it
"""
subnet = make_subnet()
loc = make_location({'subnet-ids': subnet['id']})
self.assertIn(subnet['name'], loc['subnets'][0])
self.assertIn(subnet['network'], loc['subnets'][0])
@tier1
def test_positive_create_with_subnet_by_name(self):
"""@Test: Create new location with assigned subnet to it. Use subnet
name as a parameter
@Feature: Location
@Assert: Location created successfully and has correct subnet with
expected network address assigned to it
"""
subnet = make_subnet()
loc = make_location({'subnets': subnet['name']})
self.assertIn(subnet['name'], loc['subnets'][0])
self.assertIn(subnet['network'], loc['subnets'][0])
@tier1
def test_positive_create_with_environment_by_id(self):
"""@Test: Create new location with assigned environment to it. Use
environment id as a parameter
@Feature: Location
@Assert: Location created successfully and has correct and expected
environment assigned to it
"""
env = make_environment()
loc = make_location({'environment-ids': env['id']})
self.assertEqual(loc['environments'][0], env['name'])
@tier1
def test_positive_create_with_environment_by_name(self):
"""@Test: Create new location with assigned environment to it. Use
environment name as a parameter
@Feature: Location
@Assert: Location created successfully and has correct and expected
environment assigned to it
"""
env = make_environment()
loc = make_location({'environments': env['name']})
self.assertEqual(loc['environments'][0], env['name'])
@tier1
def test_positive_create_with_hostgroup_by_id(self):
"""@Test: Create new location with assigned host group to it. Use host
group id as a parameter
@Feature: Location
@Assert: Location created successfully and has correct and expected
host group assigned to it
"""
host_group = make_hostgroup()
loc = make_location({'hostgroup-ids': host_group['id']})
self.assertEqual(loc['hostgroups'][0], host_group['name'])
@tier1
def test_positive_create_with_hostgroup_by_name(self):
"""@Test: Create new location with assigned host group to it. Use host
group name as a parameter
@Feature: Location
@Assert: Location created successfully and has correct and expected
host group assigned to it
"""
host_group = make_hostgroup()
loc = make_location({'hostgroups': host_group['name']})
self.assertEqual(loc['hostgroups'][0], host_group['name'])
@skip_if_bug_open('bugzilla', 1234287)
@tier1
def test_positive_create_with_medium(self):
"""@Test: Create new location with assigned media to it.
@Feature: Location
@Assert: Location created successfully and has correct and expected
media assigned to it
"""
medium = make_medium()
loc = make_location({'medium-ids': medium['id']})
self.assertGreater(len(loc['installation-media']), 0)
self.assertEqual(loc['installation-media'][0], medium['name'])
@tier1
def test_positive_create_with_environments_by_id(self):
"""@Test: Basically, verifying that location with multiple entities
assigned to it by id can be created in the system. Environments were
chosen for that purpose.
@Feature: Location
@Assert: Location created successfully and has correct environments
assigned to it
"""
envs_amount = randint(3, 5)
envs = [make_environment() for _ in range(envs_amount)]
loc = make_location({'environment-ids': [env['id'] for env in envs]})
self.assertEqual(len(loc['environments']), envs_amount)
for env in envs:
self.assertIn(env['name'], loc['environments'])
@tier1
def test_positive_create_with_domains_by_name(self):
"""@Test: Basically, verifying that location with multiple entities
assigned to it by name can be created in the system. Domains were
chosen for that purpose.
@Feature: Location
@Assert: Location created successfully and has correct domains assigned
to it
"""
domains_amount = randint(3, 5)
domains = [make_domain() for _ in range(domains_amount)]
loc = make_location({
'domains': [domain['name'] for domain in domains],
})
self.assertEqual(len(loc['domains']), domains_amount)
for domain in domains:
self.assertIn(domain['name'], loc['domains'])
@tier1
def test_negative_create_with_name(self):
"""@Test: Try to create location using invalid names only
@Feature: Location
@Assert: Location is not created
"""
for invalid_name in invalid_values_list():
with self.subTest(invalid_name):
with self.assertRaises(CLIFactoryError):
make_location({'name': invalid_name})
@tier1
def test_negative_create_with_same_name(self):
"""@Test: Try to create location using same name twice
@Feature: Location
@Assert: Second location is not created
"""
name = gen_string('utf8')
loc = make_location({'name': name})
self.assertEqual(loc['name'], name)
with self.assertRaises(CLIFactoryError):
make_location({'name': name})
@tier1
def test_negative_create_with_compresource_by_id(self):
"""@Test: Try to create new location with incorrect compute resource
assigned to it. Use compute resource id as a parameter
@Feature: Location
@Assert: Location is not created
"""
with self.assertRaises(CLIFactoryError):
make_location({'compute-resource-ids': gen_string('numeric', 6)})
@tier1
def test_negative_create_with_user_by_name(self):
"""@Test: Try to create new location with incorrect user assigned to it
Use user login as a parameter
@Feature: Location
@Assert: Location is not created
"""
with self.assertRaises(CLIFactoryError):
make_location({'users': gen_string('utf8', 80)})
@tier1
def test_positive_update_with_name(self):
"""@Test: Try to update location using different value types as a name
@Feature: Location
@Assert: Location is updated successfully and has proper and expected
name
"""
loc = make_location()
for new_name in valid_loc_data_list():
with self.subTest(new_name):
Location.update({
'id': loc['id'],
'new-name': new_name,
})
loc = Location.info({'id': loc['id']})
self.assertEqual(loc['name'], new_name)
@tier1
def test_positive_update_with_user_by_id(self):
"""@Test: Create new location with assigned user to it. Try to update
that location and change assigned user on another one. Use user id as a
parameter
@Feature: Location
@Assert: Location is updated successfully and has correct user assigned
to it
"""
user = [make_user() for _ in range(2)]
loc = make_location({'user-ids': user[0]['id']})
self.assertEqual(loc['users'][0], user[0]['login'])
Location.update({
'id': loc['id'],
'user-ids': user[1]['id'],
})
loc = Location.info({'id': loc['id']})
self.assertEqual(loc['users'][0], user[1]['login'])
@tier1
def test_positive_update_with_subnet_by_name(self):
"""@Test: Create new location with assigned subnet to it. Try to update
that location and change assigned subnet on another one. Use subnet
name as a parameter
@Feature: Location
@Assert: Location is updated successfully and has correct subnet with
expected network address assigned to it
"""
subnet = [make_subnet() for _ in range(2)]
loc = make_location({'subnets': subnet[0]['name']})
self.assertIn(subnet[0]['name'], loc['subnets'][0])
self.assertIn(subnet[0]['network'], loc['subnets'][0])
Location.update({
'id': loc['id'],
'subnets': subnet[1]['name'],
})
loc = Location.info({'id': loc['id']})
self.assertIn(subnet[1]['name'], loc['subnets'][0])
self.assertIn(subnet[1]['network'], loc['subnets'][0])
@tier1
def test_positive_update_from_compresources_to_compresource(self):
"""@Test: Create location with multiple (not less than three) compute
resources assigned to it. Try to update location and overwrite all
compute resources with a new single compute resource. Use compute
resource id as a parameter
@Feature: Location
@Assert: Location updated successfully and has correct compute resource
assigned to it
"""
resources_amount = randint(3, 5)
resources = [make_compute_resource() for _ in range(resources_amount)]
loc = make_location({
'compute-resource-ids': [resource['id'] for resource in resources],
})
self.assertEqual(len(loc['compute-resources']), resources_amount)
for resource in resources:
self.assertIn(resource['name'], loc['compute-resources'])
new_resource = make_compute_resource()
Location.update({
'compute-resource-ids': new_resource['id'],
'id': loc['id'],
})
loc = Location.info({'id': loc['id']})
self.assertEqual(len(loc['compute-resources']), 1)
self.assertEqual(loc['compute-resources'][0], new_resource['name'])
@tier1
def test_positive_update_from_hostgroups_to_hostgroups(self):
"""@Test: Create location with multiple (three) host groups assigned to
it. Try to update location and overwrite all host groups by new
multiple (two) host groups. Use host groups name as a parameter
@Feature: Location
@Assert: Location updated successfully and has correct and expected
host groups assigned to it
"""
host_groups = [make_hostgroup() for _ in range(3)]
loc = make_location({
'hostgroups': [hg['name'] for hg in host_groups],
})
self.assertEqual(len(loc['hostgroups']), 3)
for host_group in host_groups:
self.assertIn(host_group['name'], loc['hostgroups'])
new_host_groups = [make_hostgroup() for _ in range(2)]
Location.update({
'hostgroups': [hg['name'] for hg in new_host_groups],
'id': loc['id'],
})
loc = Location.info({'id': loc['id']})
self.assertEqual(len(loc['hostgroups']), 2)
for host_group in new_host_groups:
self.assertIn(host_group['name'], loc['hostgroups'])
@tier1
def test_negative_update_with_name(self):
"""@Test: Try to update location using invalid names only
@Feature: Location
@Assert: Location is not updated
"""
for invalid_name in invalid_values_list():
with self.subTest(invalid_name):
loc = make_location()
with self.assertRaises(CLIReturnCodeError):
Location.update({
'id': loc['id'],
'new-name': invalid_name,
})
@tier1
def test_negative_update_with_domain_by_id(self):
"""@Test: Try to update existing location with incorrect domain. Use
domain id as a parameter
@Feature: Location
@Assert: Location is not updated
"""
loc = make_location()
with self.assertRaises(CLIReturnCodeError):
Location.update({
'domain-ids': gen_string('numeric', 6),
'id': loc['id'],
})
@tier1
def test_negative_update_with_template_by_name(self):
"""@Test: Try to update existing location with incorrect config
template. Use template name as a parameter
@Feature: Location
@Assert: Location is not updated
"""
loc = make_location()
with self.assertRaises(CLIReturnCodeError):
Location.update({
'config-templates': gen_string('utf8', 80),
'id': loc['id'],
})
@tier1
def test_positive_delete_by_name(self):
"""@Test: Try to delete location using name of that location as a
parameter. Use different value types for testing.
@Feature: Location
@Assert: Location is deleted successfully
"""
for name in valid_loc_data_list():
with self.subTest(name):
loc = make_location({'name': name})
self.assertEqual(loc['name'], name)
Location.delete({'name': loc['name']})
with self.assertRaises(CLIReturnCodeError):
Location.info({'id': loc['id']})
@tier1
def test_positive_delete_by_id(self):
"""@Test: Try to delete location using id of that location as a
parameter
@Feature: Location
@Assert: Location is deleted successfully
"""
loc = make_location()
Location.delete({'id': loc['id']})
with self.assertRaises(CLIReturnCodeError):
Location.info({'id': loc['id']})
| gpl-3.0 | -342,538,987,300,248,300 | 32.286408 | 79 | 0.604686 | false |
ZeroCater/linty | interface/utils.py | 1 | 1341 | from github import Github
from social.apps.django_app.default.models import UserSocialAuth
def get_github(user):
if user.is_authenticated():
try:
data = UserSocialAuth.objects.filter(user=user).values_list('extra_data')[0][0]
username = data['login']
password = data['access_token']
return Github(username, password)
except:
pass
return Github()
def get_page_number_list(page_num, total_num_pages):
"""
Returns a list of up to 9 page numbers centered around
page_num as best it can without going below page 1 or
above the total number of pages.
>>> get_page_number_list(3, 4)
[1, 2, 3, 4]
>>> get_page_number_list(12, 20)
[8, 9, 10, 11, 12, 13, 14, 15, 16]
>>> get_page_number_list(18, 20)
[12, 13, 14, 15, 16, 17, 18, 19, 20]
"""
if total_num_pages < 10:
return range(1, total_num_pages + 1)
elif page_num <= 5: # Page to center is below 5, print 9 lowest numbers
return range(1, 10)
elif total_num_pages - page_num > 4: # Enough space on both side of number to center
return range(page_num - 4, page_num + 5)
else: # Page to center around is at or near the end of the range, print last 9 numbers
return range(total_num_pages - 8, total_num_pages + 1)
| mit | 9,190,143,782,192,360,000 | 31.707317 | 91 | 0.611484 | false |
luqasz/mcm | mcm/datastructures.py | 1 | 1260 | # -*- coding: UTF-8 -*-
from posixpath import join as pjoin
from collections import namedtuple
from mcm.cmdpathtypes import MENU_PATHS
CmdPath = namedtuple('CmdPath', ('absolute', 'type', 'keys', 'modord', 'strategy'))
def make_cmdpath(path, strategy):
attrs = dict()
attrs['absolute'] = pjoin('/', path ).rstrip('/')
attrs['strategy'] = strategy
path_attrs = MENU_PATHS[path]
attrs['keys'] = path_attrs['keys']
attrs['type'] = path_attrs['type']
attrs['modord'] = path_attrs['modord']
return CmdPath(**attrs)
class CmdPathRow(dict):
def __str__(self):
bool_map = {True:'yes', False:'no', None:'""', '':'""'}
return ' '.join('{}={}'.format(key, bool_map.get(value, value)) for key, value in self.items())
def __hash__(self):
return hash(tuple(self.items()))
def __sub__(self, other):
'''Return new instance with key,valie pairs from self not listed in other.'''
diff = dict(set(self.items()) - set(other.items()))
return CmdPathRow( diff )
def isunique(self, other, keys):
'''Test whether every key,value pair (from keys) is in other.'''
pairs = set( (key,self[key]) for key in keys )
return pairs <= set(other.items())
| gpl-2.0 | -8,474,907,335,657,072,000 | 25.25 | 103 | 0.596825 | false |
izapolsk/integration_tests | cfme/tests/infrastructure/test_ssa_vddk.py | 1 | 4245 | import fauxfactory
import pytest
from wrapanapi import VmState
from cfme import test_requirements
from cfme.infrastructure import host as host_ui
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.markers.env_markers.provider import ONE_PER_VERSION
from cfme.utils import conf
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.update import update
pytestmark = [
pytest.mark.tier(3),
test_requirements.smartstate,
pytest.mark.meta(server_roles="+smartproxy +smartstate"),
pytest.mark.provider([VMwareProvider], selector=ONE_PER_VERSION),
pytest.mark.usefixtures('setup_provider')
]
vddk_versions = [
('v6_0'),
('v6_5'),
('v6_7')
]
@pytest.fixture(scope="module")
def ssa_analysis_profile(appliance):
collected_files = []
for file in ["/etc/hosts", "/etc/passwd"]:
collected_files.append({"Name": file, "Collect Contents?": True})
analysis_profile_name = fauxfactory.gen_alphanumeric(18, start="ssa_analysis_")
analysis_profile_collection = appliance.collections.analysis_profiles
analysis_profile = analysis_profile_collection.create(
name=analysis_profile_name,
description=analysis_profile_name,
profile_type=analysis_profile_collection.VM_TYPE,
categories=["System"],
files=collected_files)
yield
if analysis_profile.exists:
analysis_profile.delete()
@pytest.fixture(params=vddk_versions, ids=([item for item in vddk_versions]), scope='function')
def configure_vddk(request, appliance, provider, vm):
vddk_version = request.param
vddk_url = conf.cfme_data.get("basic_info", {}).get("vddk_url", {}).get(vddk_version, None)
if vddk_url is None:
pytest.skip('Could not locate vddk url in cfme_data')
else:
appliance.install_vddk(vddk_url=vddk_url)
view = navigate_to(vm, 'Details')
host_name = view.entities.summary("Relationships").get_text_of("Host")
host, = [host for host in provider.hosts.all() if host.name == host_name]
host_data, = [data for data in provider.data['hosts'] if data['name'] == host.name]
# TODO: Remove Host UI validation BZ:1718209
# host.update_credentials_rest(credentials=host_data['credentials'])
host_collection = appliance.collections.hosts
host_obj = host_collection.instantiate(name=host.name, provider=provider)
with update(host_obj, validate_credentials=True):
host_obj.credentials = {'default': host_ui.Host.Credential.from_config(
host_data['credentials']['default'])}
@request.addfinalizer
def _finalize():
appliance.uninstall_vddk()
with update(host_obj):
host_obj.credentials = {'default': host_ui.Host.Credential(
principal="", secret="", verify_secret="")}
@pytest.fixture(scope="function")
def vm(request, provider, small_template, ssa_analysis_profile):
""" Fixture to provision instance on the provider """
vm_name = random_vm_name("ssa", max_length=16)
vm_obj = provider.appliance.collections.infra_vms.instantiate(vm_name,
provider,
small_template.name)
vm_obj.create_on_provider(find_in_cfme=True, allow_skip="default")
vm_obj.mgmt.ensure_state(VmState.RUNNING)
@request.addfinalizer
def _finalize():
try:
vm_obj.cleanup_on_provider()
provider.refresh_provider_relationships()
except Exception as e:
logger.exception(e)
return vm_obj
@pytest.mark.long_running
def test_ssa_vddk(vm, configure_vddk):
"""Check if different version of vddk works with provider
Polarion:
assignee: sbulage
casecomponent: SmartState
initialEstimate: 1/2h
"""
vm.smartstate_scan(wait_for_task_result=True)
view = navigate_to(vm, 'Details')
c_users = view.entities.summary('Security').get_text_of('Users')
c_groups = view.entities.summary('Security').get_text_of('Groups')
assert any([c_users != 0, c_groups != 0])
| gpl-2.0 | -31,905,897,597,105,476 | 36.901786 | 95 | 0.666196 | false |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/core/dtypes/concat.py | 1 | 19917 | """
Utility functions related to concat
"""
import numpy as np
import pandas._libs.tslib as tslib
from pandas import compat
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_sparse,
is_datetimetz,
is_datetime64_dtype,
is_timedelta64_dtype,
is_period_dtype,
is_object_dtype,
is_bool_dtype,
is_dtype_equal,
_NS_DTYPE,
_TD_DTYPE)
from pandas.core.dtypes.generic import (
ABCDatetimeIndex, ABCTimedeltaIndex,
ABCPeriodIndex, ABCRangeIndex)
def get_dtype_kinds(l):
"""
Parameters
----------
l : list of arrays
Returns
-------
a set of kinds that exist in this list of arrays
"""
typs = set()
for arr in l:
dtype = arr.dtype
if is_categorical_dtype(dtype):
typ = 'category'
elif is_sparse(arr):
typ = 'sparse'
elif isinstance(arr, ABCRangeIndex):
typ = 'range'
elif is_datetimetz(arr):
# if to_concat contains different tz,
# the result must be object dtype
typ = str(arr.dtype)
elif is_datetime64_dtype(dtype):
typ = 'datetime'
elif is_timedelta64_dtype(dtype):
typ = 'timedelta'
elif is_object_dtype(dtype):
typ = 'object'
elif is_bool_dtype(dtype):
typ = 'bool'
elif is_period_dtype(dtype):
typ = str(arr.dtype)
else:
typ = dtype.kind
typs.add(typ)
return typs
def _get_series_result_type(result, objs=None):
"""
return appropriate class of Series concat
input is either dict or array-like
"""
# concat Series with axis 1
if isinstance(result, dict):
# concat Series with axis 1
if all(is_sparse(c) for c in compat.itervalues(result)):
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame
else:
from pandas.core.frame import DataFrame
return DataFrame
# otherwise it is a SingleBlockManager (axis = 0)
if result._block.is_sparse:
from pandas.core.sparse.api import SparseSeries
return SparseSeries
else:
return objs[0]._constructor
def _get_frame_result_type(result, objs):
"""
return appropriate class of DataFrame-like concat
if any block is SparseBlock, return SparseDataFrame
otherwise, return 1st obj
"""
if any(b.is_sparse for b in result.blocks):
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame
else:
return objs[0]
def _concat_compat(to_concat, axis=0):
"""
provide concatenation of an array of arrays each of which is a single
'normalized' dtypes (in that for example, if it's object, then it is a
non-datetimelike and provide a combined dtype for the resulting array that
preserves the overall dtype if possible)
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
Returns
-------
a single array, preserving the combined dtypes
"""
# filter empty arrays
# 1-d dtypes always are included here
def is_nonempty(x):
try:
return x.shape[axis] > 0
except Exception:
return True
nonempty = [x for x in to_concat if is_nonempty(x)]
# If all arrays are empty, there's nothing to convert, just short-cut to
# the concatenation, #3121.
#
# Creating an empty array directly is tempting, but the winnings would be
# marginal given that it would still require shape & dtype calculation and
# np.concatenate which has them both implemented is compiled.
typs = get_dtype_kinds(to_concat)
_contains_datetime = any(typ.startswith('datetime') for typ in typs)
_contains_period = any(typ.startswith('period') for typ in typs)
if 'category' in typs:
# this must be priort to _concat_datetime,
# to support Categorical + datetime-like
return _concat_categorical(to_concat, axis=axis)
elif _contains_datetime or 'timedelta' in typs or _contains_period:
return _concat_datetime(to_concat, axis=axis, typs=typs)
# these are mandated to handle empties as well
elif 'sparse' in typs:
return _concat_sparse(to_concat, axis=axis, typs=typs)
if not nonempty:
# we have all empties, but may need to coerce the result dtype to
# object if we have non-numeric type operands (numpy would otherwise
# cast this to float)
typs = get_dtype_kinds(to_concat)
if len(typs) != 1:
if (not len(typs - set(['i', 'u', 'f'])) or
not len(typs - set(['bool', 'i', 'u']))):
# let numpy coerce
pass
else:
# coerce to object
to_concat = [x.astype('object') for x in to_concat]
return np.concatenate(to_concat, axis=axis)
def _concat_categorical(to_concat, axis=0):
"""Concatenate an object/categorical array of arrays, each of which is a
single dtype
Parameters
----------
to_concat : array of arrays
axis : int
Axis to provide concatenation in the current implementation this is
always 0, e.g. we only have 1D categoricals
Returns
-------
Categorical
A single array, preserving the combined dtypes
"""
def _concat_asobject(to_concat):
to_concat = [x.get_values() if is_categorical_dtype(x.dtype)
else x.ravel() for x in to_concat]
res = _concat_compat(to_concat)
if axis == 1:
return res.reshape(1, len(res))
else:
return res
# we could have object blocks and categoricals here
# if we only have a single categoricals then combine everything
# else its a non-compat categorical
categoricals = [x for x in to_concat if is_categorical_dtype(x.dtype)]
# validate the categories
if len(categoricals) != len(to_concat):
pass
else:
# when all categories are identical
first = to_concat[0]
if all(first.is_dtype_equal(other) for other in to_concat[1:]):
return union_categoricals(categoricals)
return _concat_asobject(to_concat)
def union_categoricals(to_union, sort_categories=False, ignore_order=False):
"""
Combine list-like of Categorical-like, unioning categories. All
categories must have the same dtype.
.. versionadded:: 0.19.0
Parameters
----------
to_union : list-like of Categorical, CategoricalIndex,
or Series with dtype='category'
sort_categories : boolean, default False
If true, resulting categories will be lexsorted, otherwise
they will be ordered as they appear in the data.
ignore_order: boolean, default False
If true, the ordered attribute of the Categoricals will be ignored.
Results in an unordered categorical.
.. versionadded:: 0.20.0
Returns
-------
result : Categorical
Raises
------
TypeError
- all inputs do not have the same dtype
- all inputs do not have the same ordered property
- all inputs are ordered and their categories are not identical
- sort_categories=True and Categoricals are ordered
ValueError
Empty list of categoricals passed
Notes
-----
To learn more about categories, see `link
<http://pandas.pydata.org/pandas-docs/stable/categorical.html#unioning>`__
Examples
--------
>>> from pandas.api.types import union_categoricals
If you want to combine categoricals that do not necessarily have
the same categories, `union_categoricals` will combine a list-like
of categoricals. The new categories will be the union of the
categories being combined.
>>> a = pd.Categorical(["b", "c"])
>>> b = pd.Categorical(["a", "b"])
>>> union_categoricals([a, b])
[b, c, a, b]
Categories (3, object): [b, c, a]
By default, the resulting categories will be ordered as they appear
in the `categories` of the data. If you want the categories to be
lexsorted, use `sort_categories=True` argument.
>>> union_categoricals([a, b], sort_categories=True)
[b, c, a, b]
Categories (3, object): [a, b, c]
`union_categoricals` also works with the case of combining two
categoricals of the same categories and order information (e.g. what
you could also `append` for).
>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "a"], ordered=True)
>>> union_categoricals([a, b])
[a, b, a, b, a]
Categories (2, object): [a < b]
Raises `TypeError` because the categories are ordered and not identical.
>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "c"], ordered=True)
>>> union_categoricals([a, b])
TypeError: to union ordered Categoricals, all categories must be the same
New in version 0.20.0
Ordered categoricals with different categories or orderings can be
combined by using the `ignore_ordered=True` argument.
>>> a = pd.Categorical(["a", "b", "c"], ordered=True)
>>> b = pd.Categorical(["c", "b", "a"], ordered=True)
>>> union_categoricals([a, b], ignore_order=True)
[a, b, c, c, b, a]
Categories (3, object): [a, b, c]
`union_categoricals` also works with a `CategoricalIndex`, or `Series`
containing categorical data, but note that the resulting array will
always be a plain `Categorical`
>>> a = pd.Series(["b", "c"], dtype='category')
>>> b = pd.Series(["a", "b"], dtype='category')
>>> union_categoricals([a, b])
[b, c, a, b]
Categories (3, object): [b, c, a]
"""
from pandas import Index, Categorical, CategoricalIndex, Series
from pandas.core.categorical import _recode_for_categories
if len(to_union) == 0:
raise ValueError('No Categoricals to union')
def _maybe_unwrap(x):
if isinstance(x, (CategoricalIndex, Series)):
return x.values
elif isinstance(x, Categorical):
return x
else:
raise TypeError("all components to combine must be Categorical")
to_union = [_maybe_unwrap(x) for x in to_union]
first = to_union[0]
if not all(is_dtype_equal(other.categories.dtype, first.categories.dtype)
for other in to_union[1:]):
raise TypeError("dtype of categories must be the same")
ordered = False
if all(first.is_dtype_equal(other) for other in to_union[1:]):
# identical categories - fastpath
categories = first.categories
ordered = first.ordered
new_codes = np.concatenate([c.codes for c in to_union])
if sort_categories and not ignore_order and ordered:
raise TypeError("Cannot use sort_categories=True with "
"ordered Categoricals")
if sort_categories and not categories.is_monotonic_increasing:
categories = categories.sort_values()
indexer = categories.get_indexer(first.categories)
from pandas.core.algorithms import take_1d
new_codes = take_1d(indexer, new_codes, fill_value=-1)
elif ignore_order or all(not c.ordered for c in to_union):
# different categories - union and recode
cats = first.categories.append([c.categories for c in to_union[1:]])
categories = Index(cats.unique())
if sort_categories:
categories = categories.sort_values()
new_codes = []
for c in to_union:
new_codes.append(_recode_for_categories(c.codes, c.categories,
categories))
new_codes = np.concatenate(new_codes)
else:
# ordered - to show a proper error message
if all(c.ordered for c in to_union):
msg = ("to union ordered Categoricals, "
"all categories must be the same")
raise TypeError(msg)
else:
raise TypeError('Categorical.ordered must be the same')
if ignore_order:
ordered = False
return Categorical(new_codes, categories=categories, ordered=ordered,
fastpath=True)
def _concat_datetime(to_concat, axis=0, typs=None):
"""
provide concatenation of an datetimelike array of arrays each of which is a
single M8[ns], datetimet64[ns, tz] or m8[ns] dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
typs : set of to_concat dtypes
Returns
-------
a single array, preserving the combined dtypes
"""
def convert_to_pydatetime(x, axis):
# coerce to an object dtype
# if dtype is of datetimetz or timezone
if x.dtype.kind == _NS_DTYPE.kind:
if getattr(x, 'tz', None) is not None:
x = x.asobject.values
else:
shape = x.shape
x = tslib.ints_to_pydatetime(x.view(np.int64).ravel(),
box=True)
x = x.reshape(shape)
elif x.dtype == _TD_DTYPE:
shape = x.shape
x = tslib.ints_to_pytimedelta(x.view(np.int64).ravel(), box=True)
x = x.reshape(shape)
if axis == 1:
x = np.atleast_2d(x)
return x
if typs is None:
typs = get_dtype_kinds(to_concat)
# must be single dtype
if len(typs) == 1:
_contains_datetime = any(typ.startswith('datetime') for typ in typs)
_contains_period = any(typ.startswith('period') for typ in typs)
if _contains_datetime:
if 'datetime' in typs:
new_values = np.concatenate([x.view(np.int64) for x in
to_concat], axis=axis)
return new_values.view(_NS_DTYPE)
else:
# when to_concat has different tz, len(typs) > 1.
# thus no need to care
return _concat_datetimetz(to_concat)
elif 'timedelta' in typs:
new_values = np.concatenate([x.view(np.int64) for x in to_concat],
axis=axis)
return new_values.view(_TD_DTYPE)
elif _contains_period:
# PeriodIndex must be handled by PeriodIndex,
# Thus can't meet this condition ATM
# Must be changed when we adding PeriodDtype
raise NotImplementedError
# need to coerce to object
to_concat = [convert_to_pydatetime(x, axis) for x in to_concat]
return np.concatenate(to_concat, axis=axis)
def _concat_datetimetz(to_concat, name=None):
"""
concat DatetimeIndex with the same tz
all inputs must be DatetimeIndex
it is used in DatetimeIndex.append also
"""
# do not pass tz to set because tzlocal cannot be hashed
if len(set([str(x.dtype) for x in to_concat])) != 1:
raise ValueError('to_concat must have the same tz')
tz = to_concat[0].tz
# no need to localize because internal repr will not be changed
new_values = np.concatenate([x.asi8 for x in to_concat])
return to_concat[0]._simple_new(new_values, tz=tz, name=name)
def _concat_index_asobject(to_concat, name=None):
"""
concat all inputs as object. DatetimeIndex, TimedeltaIndex and
PeriodIndex are converted to object dtype before concatenation
"""
klasses = ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex
to_concat = [x.asobject if isinstance(x, klasses) else x
for x in to_concat]
from pandas import Index
self = to_concat[0]
attribs = self._get_attributes_dict()
attribs['name'] = name
to_concat = [x._values if isinstance(x, Index) else x
for x in to_concat]
return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs)
def _concat_sparse(to_concat, axis=0, typs=None):
"""
provide concatenation of an sparse/dense array of arrays each of which is a
single dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
typs : set of to_concat dtypes
Returns
-------
a single array, preserving the combined dtypes
"""
from pandas.core.sparse.array import SparseArray, _make_index
def convert_sparse(x, axis):
# coerce to native type
if isinstance(x, SparseArray):
x = x.get_values()
x = x.ravel()
if axis > 0:
x = np.atleast_2d(x)
return x
if typs is None:
typs = get_dtype_kinds(to_concat)
if len(typs) == 1:
# concat input as it is if all inputs are sparse
# and have the same fill_value
fill_values = set(c.fill_value for c in to_concat)
if len(fill_values) == 1:
sp_values = [c.sp_values for c in to_concat]
indexes = [c.sp_index.to_int_index() for c in to_concat]
indices = []
loc = 0
for idx in indexes:
indices.append(idx.indices + loc)
loc += idx.length
sp_values = np.concatenate(sp_values)
indices = np.concatenate(indices)
sp_index = _make_index(loc, indices, kind=to_concat[0].sp_index)
return SparseArray(sp_values, sparse_index=sp_index,
fill_value=to_concat[0].fill_value)
# input may be sparse / dense mixed and may have different fill_value
# input must contain sparse at least 1
sparses = [c for c in to_concat if is_sparse(c)]
fill_values = [c.fill_value for c in sparses]
sp_indexes = [c.sp_index for c in sparses]
# densify and regular concat
to_concat = [convert_sparse(x, axis) for x in to_concat]
result = np.concatenate(to_concat, axis=axis)
if not len(typs - set(['sparse', 'f', 'i'])):
# sparsify if inputs are sparse and dense numerics
# first sparse input's fill_value and SparseIndex is used
result = SparseArray(result.ravel(), fill_value=fill_values[0],
kind=sp_indexes[0])
else:
# coerce to object if needed
result = result.astype('object')
return result
def _concat_rangeindex_same_dtype(indexes):
"""
Concatenates multiple RangeIndex instances. All members of "indexes" must
be of type RangeIndex; result will be RangeIndex if possible, Int64Index
otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
"""
start = step = next = None
for obj in indexes:
if not len(obj):
continue
if start is None:
# This is set by the first non-empty index
start = obj._start
if step is None and len(obj) > 1:
step = obj._step
elif step is None:
# First non-empty index had only one element
if obj._start == start:
return _concat_index_asobject(indexes)
step = obj._start - start
non_consecutive = ((step != obj._step and len(obj) > 1) or
(next is not None and obj._start != next))
if non_consecutive:
# Int64Index._append_same_dtype([ix.astype(int) for ix in indexes])
# would be preferred... but it currently resorts to
# _concat_index_asobject anyway.
return _concat_index_asobject(indexes)
if step is not None:
next = obj[-1] + step
if start is None:
start = obj._start
step = obj._step
stop = obj._stop if next is None else next
return indexes[0].__class__(start, stop, step)
| apache-2.0 | 8,689,723,675,431,700,000 | 32.084718 | 79 | 0.603956 | false |
SylvainCecchetto/plugin.video.catchuptvandmore | plugin.video.catchuptvandmore/resources/lib/channels/tn/nessma.py | 1 | 6252 | # -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2018 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from builtins import str
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib import web_utils
from resources.lib import resolver_proxy
from resources.lib.menu_utils import item_post_treatment
import re
import urlquick
# TO DO
# Add Replays/Serie TV (required account)
# Live
URL_ROOT = 'https://www.nessma.tv'
URL_LIVE = URL_ROOT + '/ar/live'
URL_REPLAY = URL_ROOT + '/ar/replays'
URL_VIDEOS = URL_ROOT + '/ar/videos'
@Route.register
def list_categories(plugin, item_id, **kwargs):
"""
Build categories listing
"""
item = Listitem()
item.label = 'الفيديوهات'
item.set_callback(list_videos, item_id=item_id, page='1')
item_post_treatment(item)
yield item
item = Listitem()
item.label = 'مشاهدة الحلقات'
item.set_callback(list_programs, item_id=item_id)
item_post_treatment(item)
yield item
@Route.register
def list_programs(plugin, item_id, **kwargs):
"""
Build progams listing
- Le JT
- ...
"""
resp = urlquick.get(URL_REPLAY)
root = resp.parse()
for program_datas in root.iterfind(".//div[@class='col-sm-3']"):
if program_datas.find('.//img').get('alt') is not None:
program_title = program_datas.find('.//img').get('alt')
program_image = program_datas.find('.//img').get('src')
program_url = program_datas.find('.//a').get('href')
item = Listitem()
item.label = program_title
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(list_videos_replays,
item_id=item_id,
program_url=program_url,
page='1')
item_post_treatment(item)
yield item
@Route.register
def list_videos_replays(plugin, item_id, program_url, page, **kwargs):
resp = urlquick.get(program_url + '?page=%s' % (page))
root = resp.parse()
if root.find(".//div[@class='row replaynessma-cats row-eq-height ']") is not None:
root2 = resp.parse("div", attrs={"class": "row replaynessma-cats row-eq-height "})
for video_datas in root2.iterfind(".//article"):
video_title = video_datas.find('.//h3/a').text
video_image = video_datas.find('.//img').get('src')
video_url = video_datas.find('.//a').get('href')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
else:
for video_datas in root.iterfind(".//div[@class='col-sm-3']"):
video_title = video_datas.find('.//img').get('alt')
video_image = video_datas.find('.//img').get('src')
video_url = video_datas.find('.//a').get('href')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
yield Listitem.next_page(item_id=item_id,
program_url=program_url,
page=str(int(page) + 1))
@Route.register
def list_videos(plugin, item_id, page, **kwargs):
resp = urlquick.get(URL_VIDEOS + '?page=%s' % (page))
root = resp.parse()
for video_datas in root.iterfind(".//div[@class='col-sm-4']"):
if video_datas.find('.//img') is not None:
video_title = video_datas.find('.//img').get('alt')
video_image = video_datas.find('.//img').get('src')
video_url = video_datas.find('.//a').get('href')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
yield Listitem.next_page(item_id=item_id,
page=str(int(page) + 1))
@Resolver.register
def get_video_url(plugin,
item_id,
video_url,
download_mode=False,
**kwargs):
resp = urlquick.get(video_url)
video_id = re.compile(r'youtube\.com\/embed\/(.*.)\?').findall(
resp.text)[0]
return resolver_proxy.get_stream_youtube(plugin, video_id, download_mode)
@Resolver.register
def get_live_url(plugin, item_id, **kwargs):
resp = urlquick.get(URL_LIVE)
live_id = re.compile(r'dailymotion.com/embed/video/(.*?)[\?\"]').findall(
resp.text)[0]
return resolver_proxy.get_stream_dailymotion(plugin, live_id, False)
| gpl-2.0 | 3,812,057,039,132,161,000 | 32.31016 | 90 | 0.585006 | false |
JSBCCA/pythoncode | exercises/anagram.py | 1 | 4271 | from random import shuffle, choice
import time
import sys
original = ['purple', 'blue', 'green', 'yellow', 'orange', 'red', 'pink',
'white', 'gray', 'black', 'brown', 'ant', 'horse', 'dog', 'cat',
'food', 'house', 'cheese', 'pizza', 'hamburger', 'shark', 'bird',
'bat', 'baseball', 'football', 'video', 'game', 'hero', 'infinity',
'car', 'television', 'smart', 'telephone', 'cow', 'tornado', 'art',
'fire', 'water', 'earthquake', 'flood', 'dolphin', 'crow', 'shoe',
'sock', 'pants', 'shirt', 'hand', 'foot', 'tooth', 'legend', 'fly',
'snow', 'queen', 'king', 'clown', 'terrorist', 'airplane', 'truck',
'cone', 'brush', 'finger', 'hair', 'rabbit', 'table', 'bottle',
'can', 'bag', 'sword', 'gun', 'chicken', 'school', 'lock', 'eagle',
'hum', 'rainbow', 'rain', 'bow', 'radio', 'toothbrush', 'liquid',
'gas', 'solid', 'plasma', 'play', 'player', 'prayer', 'earth',
'wind', 'air', 'oxygen', 'clean', 'tomato', 'potato', 'volcano',
'piano', 'pray', 'fort', 'rock', 'pop', 'bee', 'bug', 'penguin',
'fish', 'beast', 'whale', 'hammer', 'crack', 'dark', 'light',
'heavy', 'pun', 'pond', 'punish', 'zero', 'thousand', 'hundred',
'alpha', 'omega', 'cream', 'oblivion', 'dragon', 'chaos', 'point',
'money', 'shield', 'super', 'dairy', 'okay', 'tree', 'plant',
'leaf', 'nuclear', 'family', 'code', 'program', 'president', 'ice',
'agent', 'prince', 'princess', 'boat', 'submarine', 'sandwich',
'elephant', 'home', 'cookie', 'soda', 'doll', 'nice', 'count',
'indigo', 'violet', 'violent', 'bark', 'branch', 'olive', 'pasta',
'file', 'ocean', 'sea', 'pirate', 'ninja', 'dinosaur', 'bowl',
'plate', 'spoon', 'fork', 'knife', 'spork', 'spatula', 'spaghetti',
'board', 'abroad', 'girl', 'boy', 'man', 'woman', 'child', 'adult',
'parent', 'son', 'sun', 'daughter', 'organ', 'trumpet', 'guitar',
'violin', 'trombone', 'bone', 'skeleton', 'meme', 'internet',
'drum', 'strum', 'stomach', 'piccolo', 'flute', 'symbol', 'digit',
'ship', 'robot', 'mouse', 'house', 'alone', 'create', 'fight',
'flashlight', 'deodorant', 'star', 'sky', 'vision', 'vampire',
'past', 'door', 'present', 'future', 'time', 'space', 'coffin',
'ghost', 'zombie', 'heaven', 'chocolate', 'candy', 'sweet',
'rude', 'forgive', 'computer', 'apocalypse', 'jupiter', 'mercury',
'brutal', 'flower', 'genius', 'window', 'muscle', 'miniscule',
'humongous', 'homunculus', 'terrifying', 'reindeer', 'incredible',
'watermelon', 'apricot', 'pumpkin', 'royalty', 'country', 'ear']
wordlist = []
mode = input("Easy, Medium, or Hard? ").lower().strip()
if mode == "easy":
for i in original:
if len(i) < 6:
wordlist.append(i)
time.sleep(1)
print("\n" * 25)
elif mode == "medium":
for i in original:
if (len(i) >= 5) and (len(i) < 7):
wordlist.append(i)
time.sleep(1)
print("\n" * 25)
elif mode == "hard":
for i in original:
if len(i) > 7:
wordlist.append(i)
time.sleep(1)
print("\n" * 25)
elif mode == 'q':
sys.exit()
else:
for i in original:
if len(i) < 6:
wordlist.append(i)
time.sleep(1)
print("\n" * 25)
while True:
x = False
if len(wordlist) == 0:
print("You win! Congratulations!")
sys.exit()
word_true = choice(wordlist)
word_ana = list(word_true)
while x is False:
shuffle(word_ana)
if word_ana != list(word_true):
x = True
word_ana = ''.join(word_ana)
guess = False
print("Word anagram: " + str(word_ana))
while guess != word_true:
guess = input("Guess the word! ").strip().lower()
if guess == 'q':
sys.exit()
elif guess == word_true:
print("Correct!")
wordlist.remove(word_true)
time.sleep(2)
print("\n" * 25)
elif set(guess) != set(word_true):
print("Come on, you're not even trying...")
| mit | -5,148,330,549,776,378,000 | 43.957895 | 79 | 0.498712 | false |
fayimora/osprey | crawlers/one337x.py | 1 | 2198 | import requests
from .common import fetch_html
from bs4 import BeautifulSoup
from torrent import Torrent
class One337x(object):
"""Crawler for https://1337x.to"""
def __init__(self):
super(One337x, self).__init__()
self.domain = 'https://1337x.to'
def fetch_torrents(self, search_term):
# NOTE: For now we are only dealing with page 1
url = '%s/search/%s/1/' % (self.domain, search_term)
torrents = self.extract_torrents(url)
return torrents
def extract_torrents(self, url):
def extract_table_rows(html_doc):
soup = BeautifulSoup(html_doc, 'html.parser')
table_field = soup.find('table')
table_body = table_field.findChild('tbody')
table_rows = table_body.findChildren('tr')
return table_rows
def extract_magnet_link(url):
html_doc = requests.get(url).text
soup = BeautifulSoup(html_doc, 'html.parser')
magnet_link = soup.find('ul', class_='download-links').findChild('a', class_='btn-magnet').attrs['href']
return magnet_link
def parse_td(row):
name_tag = row.find('td', class_='coll-1')
name_contents = name_tag.contents
name = name_contents[0].text + name_contents[1].text
page_url = row.find('td', class_='coll-1').findAll('a')[1].attrs['href']
id_ = page_url.split('/')[2]
page_url = self.domain+page_url
seeds = row.find('td', class_='coll-2 seeds').text
leeches = row.find('td', class_='coll-3 leeches').text
date = row.find('td', class_='coll-date').text
size = str(row.find('td', class_='coll-4').contents[0])
magnet_link = extract_magnet_link(page_url)
return Torrent(
id=id_,
name=name,
seeds=seeds,
leeches=leeches,
date=date,
size=size,
url=page_url,
magnet=magnet_link
)
html_doc = fetch_html(url)
table_rows = extract_table_rows(html_doc)
return map(parse_td, table_rows)
| mit | 1,446,172,408,346,031,000 | 36.896552 | 116 | 0.547316 | false |
xiaom-GitHub/openthread | tests/scripts/thread-cert/Cert_5_3_06_RouterIdMask.py | 1 | 6339 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
import mle
import config
import command
DUT_LEADER = 1
ROUTER1 = 2
ROUTER2 = 3
class Cert_5_3_6_RouterIdMask(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,4):
self.nodes[i] = node.Node(i)
self.nodes[DUT_LEADER].set_panid(0xface)
self.nodes[DUT_LEADER].set_mode('rsdn')
self.nodes[DUT_LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[DUT_LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[DUT_LEADER].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[ROUTER2].set_panid(0xface)
self.nodes[ROUTER2].set_mode('rsdn')
self._setUpRouter2()
self.sniffer = config.create_default_thread_sniffer()
self.sniffer.start()
def _setUpRouter2(self):
self.nodes[ROUTER2].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[ROUTER2].enable_whitelist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
def tearDown(self):
self.sniffer.stop()
del self.sniffer
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
# 1
self.nodes[DUT_LEADER].start()
self.nodes[DUT_LEADER].set_state('leader')
self.assertEqual(self.nodes[DUT_LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[ROUTER2].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
router2_id = self.nodes[ROUTER2].get_router_id()
# Wait DUT_LEADER to establish routing to ROUTER2 via ROUTER1's MLE advertisement.
time.sleep(config.MAX_ADVERTISEMENT_INTERVAL)
# 2
self.nodes[ROUTER2].reset()
self._setUpRouter2()
# 3 & 4
# Flush the message queue to avoid possible impact on follow-up verification.
dut_messages = self.sniffer.get_messages_sent_by(DUT_LEADER)
# Verify the cost from DUT_LEADER to ROUTER2 goes to infinity in 12 mins.
routing_cost = 1
for i in range(0, 24):
time.sleep(30)
print("%ss" %((i + 1) * 30))
leader_messages = self.sniffer.get_messages_sent_by(DUT_LEADER)
msg = leader_messages.last_mle_message(mle.CommandType.ADVERTISEMENT, False)
if msg == None:
continue
self.assertTrue(command.check_id_set(msg, router2_id))
routing_cost = command.get_routing_cost(msg, router2_id)
if routing_cost == 0:
break
self.assertTrue(routing_cost == 0)
time.sleep(config.INFINITE_COST_TIMEOUT + config.MAX_ADVERTISEMENT_INTERVAL)
leader_messages = self.sniffer.get_messages_sent_by(DUT_LEADER)
msg = leader_messages.last_mle_message(mle.CommandType.ADVERTISEMENT)
self.assertFalse(command.check_id_set(msg, router2_id))
# 5
# Flush the message queue to avoid possible impact on follow-up verification.
dut_messages = self.sniffer.get_messages_sent_by(DUT_LEADER)
self.nodes[ROUTER2].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
time.sleep(config.MAX_ADVERTISEMENT_INTERVAL)
leader_messages = self.sniffer.get_messages_sent_by(DUT_LEADER)
leader_messages.last_mle_message(mle.CommandType.ADVERTISEMENT)
# 6
self.nodes[ROUTER1].stop()
self.nodes[ROUTER2].stop()
router1_id = self.nodes[ROUTER1].get_router_id()
router2_id = self.nodes[ROUTER2].get_router_id()
time.sleep(config.MAX_NEIGHBOR_AGE + config.MAX_ADVERTISEMENT_INTERVAL)
leader_messages = self.sniffer.get_messages_sent_by(DUT_LEADER)
msg = leader_messages.last_mle_message(mle.CommandType.ADVERTISEMENT)
self.assertEqual(command.get_routing_cost(msg, router1_id), 0)
time.sleep(config.INFINITE_COST_TIMEOUT + config.MAX_ADVERTISEMENT_INTERVAL)
leader_messages = self.sniffer.get_messages_sent_by(DUT_LEADER)
msg = leader_messages.last_mle_message(mle.CommandType.ADVERTISEMENT)
self.assertFalse(command.check_id_set(msg, router1_id))
self.assertFalse(command.check_id_set(msg, router2_id))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,042,447,781,126,635,500 | 38.61875 | 90 | 0.673135 | false |
zsjohny/jumpserver | apps/settings/forms/security.py | 1 | 3532 | # coding: utf-8
#
from django import forms
from django.utils.translation import ugettext_lazy as _
from .base import BaseForm
__all__ = ['SecuritySettingForm']
class SecuritySettingForm(BaseForm):
# MFA global setting
SECURITY_MFA_AUTH = forms.BooleanField(
required=False, label=_("MFA"),
help_text=_(
'After opening, all user login must use MFA'
'(valid for all users, including administrators)'
)
)
# Execute commands for user
SECURITY_COMMAND_EXECUTION = forms.BooleanField(
required=False, label=_("Batch execute commands"),
help_text=_("Allow user batch execute commands")
)
SECURITY_SERVICE_ACCOUNT_REGISTRATION = forms.BooleanField(
required=False, label=_("Service account registration"),
help_text=_("Allow using bootstrap token register service account, "
"when terminal setup, can disable it")
)
# limit login count
SECURITY_LOGIN_LIMIT_COUNT = forms.IntegerField(
min_value=3, max_value=99999,
label=_("Limit the number of login failures")
)
# limit login time
SECURITY_LOGIN_LIMIT_TIME = forms.IntegerField(
min_value=5, max_value=99999, label=_("No logon interval"),
help_text=_(
"Tip: (unit/minute) if the user has failed to log in for a limited "
"number of times, no login is allowed during this time interval."
)
)
# ssh max idle time
SECURITY_MAX_IDLE_TIME = forms.IntegerField(
min_value=1, max_value=99999, required=False,
label=_("Connection max idle time"),
help_text=_(
'If idle time more than it, disconnect connection '
'Unit: minute'
),
)
# password expiration time
SECURITY_PASSWORD_EXPIRATION_TIME = forms.IntegerField(
min_value=1, max_value=99999, label=_("Password expiration time"),
help_text=_(
"Tip: (unit: day) "
"If the user does not update the password during the time, "
"the user password will expire failure;"
"The password expiration reminder mail will be automatic sent to the user "
"by system within 5 days (daily) before the password expires"
)
)
# min length
SECURITY_PASSWORD_MIN_LENGTH = forms.IntegerField(
min_value=6, max_value=30, label=_("Password minimum length"),
)
# upper case
SECURITY_PASSWORD_UPPER_CASE = forms.BooleanField(
required=False, label=_("Must contain capital letters"),
help_text=_(
'After opening, the user password changes '
'and resets must contain uppercase letters')
)
# lower case
SECURITY_PASSWORD_LOWER_CASE = forms.BooleanField(
required=False, label=_("Must contain lowercase letters"),
help_text=_('After opening, the user password changes '
'and resets must contain lowercase letters')
)
# number
SECURITY_PASSWORD_NUMBER = forms.BooleanField(
required=False, label=_("Must contain numeric characters"),
help_text=_('After opening, the user password changes '
'and resets must contain numeric characters')
)
# special char
SECURITY_PASSWORD_SPECIAL_CHAR = forms.BooleanField(
required=False, label=_("Must contain special characters"),
help_text=_('After opening, the user password changes '
'and resets must contain special characters')
)
| gpl-2.0 | 4,208,873,372,663,636,500 | 36.978495 | 87 | 0.62769 | false |
sbhowmik89/oppia | core/domain/summary_services.py | 1 | 3602 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on exploration summaries."""
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import stats_jobs_continuous
from core.domain import user_services
import utils
def get_human_readable_contributors_summary(contributors_summary):
contributor_ids = contributors_summary.keys()
contributor_usernames = user_services.get_human_readable_user_ids(
contributor_ids)
contributor_profile_pictures = (
user_services.get_profile_pictures_by_user_ids(contributor_ids))
return {
contributor_usernames[ind]: {
'num_commits': contributors_summary[contributor_ids[ind]],
'profile_picture_data_url': contributor_profile_pictures[
contributor_ids[ind]]
}
for ind in xrange(len(contributor_ids))
}
def get_displayable_exp_summary_dicts_matching_ids(exploration_ids):
"""Given a list of exploration ids, filters the list for
explorations that are currently non-private and not deleted,
and returns a list of dicts of the corresponding exploration summaries.
Please use this function when needing summary information to display on
exploration summary tiles in the frontend.
"""
displayable_exp_summaries = []
exploration_summaries = (
exp_services.get_exploration_summaries_matching_ids(exploration_ids))
view_counts = (
stats_jobs_continuous.StatisticsAggregator.get_views_multi(
exploration_ids))
for ind, exploration_summary in enumerate(exploration_summaries):
if exploration_summary and exploration_summary.status != (
rights_manager.ACTIVITY_STATUS_PRIVATE):
displayable_exp_summaries.append({
'id': exploration_summary.id,
'title': exploration_summary.title,
'category': exploration_summary.category,
'objective': exploration_summary.objective,
'language_code': exploration_summary.language_code,
'last_updated_msec': utils.get_time_in_millisecs(
exploration_summary.exploration_model_last_updated
),
'status': exploration_summary.status,
'ratings': exploration_summary.ratings,
'community_owned': exploration_summary.community_owned,
'human_readable_contributors_summary':
get_human_readable_contributors_summary(
exploration_summary.contributors_summary),
'tags': exploration_summary.tags,
'thumbnail_icon_url': utils.get_thumbnail_icon_url_for_category(
exploration_summary.category),
'thumbnail_bg_color': utils.get_hex_color_for_category(
exploration_summary.category),
'num_views': view_counts[ind],
})
return displayable_exp_summaries
| apache-2.0 | -3,810,459,752,714,880,000 | 42.926829 | 80 | 0.66935 | false |
MangoMangoDevelopment/neptune | lib/ros_comm-1.12.0/tools/rosmaster/src/rosmaster/util.py | 1 | 2367 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
"""
Utility routines for rosmaster.
"""
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from xmlrpc.client import ServerProxy
except ImportError:
from xmlrpclib import ServerProxy
_proxies = {} #cache ServerProxys
def xmlrpcapi(uri):
"""
@return: instance for calling remote server or None if not a valid URI
@rtype: xmlrpc.client.ServerProxy
"""
if uri is None:
return None
uriValidate = urlparse(uri)
if not uriValidate[0] or not uriValidate[1]:
return None
if not uri in _proxies:
_proxies[uri] = ServerProxy(uri)
return _proxies[uri]
def remove_server_proxy(uri):
if uri in _proxies:
del _proxies[uri]
| bsd-3-clause | 5,963,045,820,132,176,000 | 34.863636 | 74 | 0.741445 | false |
raonyguimaraes/mendelmd | variants/migrations/0001_initial.py | 1 | 14536 | # Generated by Django 2.0.1 on 2018-02-12 19:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('individuals', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Variant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.TextField(db_index=True)),
('pos_index', models.TextField(db_index=True)),
('chr', models.TextField(db_index=True, verbose_name='Chr')),
('pos', models.IntegerField(db_index=True)),
('variant_id', models.TextField(db_index=True, verbose_name='ID')),
('ref', models.TextField(blank=True, db_index=True, null=True)),
('alt', models.TextField(blank=True, db_index=True, null=True)),
('qual', models.FloatField(db_index=True)),
('filter', models.TextField(db_index=True)),
('info', models.TextField(blank=True, null=True)),
('format', models.TextField(blank=True, db_index=True, null=True)),
('genotype_col', models.TextField(blank=True, db_index=True, null=True)),
('genotype', models.TextField(db_index=True)),
('read_depth', models.IntegerField()),
('gene', models.TextField(blank=True, db_index=True, null=True)),
('mutation_type', models.TextField(db_index=True, null=True)),
('vartype', models.TextField(db_index=True, null=True)),
('genomes1k_maf', models.FloatField(blank=True, db_index=True, null=True, verbose_name='1000 Genomes Frequency')),
('dbsnp_maf', models.FloatField(blank=True, db_index=True, null=True, verbose_name='dbSNP Frequency')),
('esp_maf', models.FloatField(blank=True, db_index=True, null=True, verbose_name='ESP6500 Frequency')),
('dbsnp_build', models.IntegerField(db_index=True, null=True)),
('sift', models.FloatField(blank=True, db_index=True, null=True)),
('sift_pred', models.TextField(blank=True, db_index=True, null=True)),
('polyphen2', models.FloatField(blank=True, db_index=True, null=True)),
('polyphen2_pred', models.TextField(blank=True, db_index=True, null=True)),
('condel', models.FloatField(blank=True, db_index=True, null=True)),
('condel_pred', models.TextField(blank=True, db_index=True, null=True)),
('dann', models.FloatField(blank=True, db_index=True, null=True)),
('cadd', models.FloatField(blank=True, db_index=True, null=True)),
('hi_index_str', models.TextField(blank=True, db_index=True, null=True)),
('hi_index', models.FloatField(blank=True, db_index=True, null=True)),
('hi_index_perc', models.FloatField(blank=True, db_index=True, null=True)),
('is_at_omim', models.BooleanField(db_index=True, default=False)),
('is_at_hgmd', models.BooleanField(db_index=True, default=False)),
('hgmd_entries', models.TextField(blank=True, db_index=True, null=True)),
('snpeff_effect', models.TextField(blank=True, db_index=True, null=True)),
('snpeff_impact', models.TextField(blank=True, db_index=True, null=True)),
('snpeff_gene_name', models.TextField(blank=True, db_index=True, null=True)),
('vep_allele', models.TextField(blank=True, db_index=True, null=True)),
('vep_gene', models.TextField(blank=True, db_index=True, null=True)),
('vep_feature', models.TextField(blank=True, db_index=True, null=True)),
('vep_feature_type', models.TextField(blank=True, db_index=True, null=True)),
('vep_consequence', models.TextField(blank=True, db_index=True, null=True)),
('vep_cdna_position', models.TextField(blank=True, db_index=True, null=True)),
('vep_cds_position', models.TextField(blank=True, db_index=True, null=True)),
('vep_protein_position', models.TextField(blank=True, db_index=True, null=True)),
('vep_amino_acids', models.TextField(blank=True, db_index=True, null=True)),
('vep_codons', models.TextField(blank=True, db_index=True, null=True)),
('vep_existing_variation', models.TextField(blank=True, db_index=True, null=True)),
('vep_distance', models.TextField(blank=True, db_index=True, null=True)),
('vep_strand', models.TextField(blank=True, db_index=True, null=True)),
('vep_symbol', models.TextField(blank=True, db_index=True, null=True)),
('vep_symbol_source', models.TextField(blank=True, db_index=True, null=True)),
('vep_sift', models.TextField(blank=True, db_index=True, null=True)),
('vep_polyphen', models.TextField(blank=True, db_index=True, null=True)),
('vep_condel', models.TextField(blank=True, db_index=True, null=True)),
('ensembl_clin_HGMD', models.BooleanField(db_index=True, default=False)),
('clinvar_CLNSRC', models.TextField(blank=True, db_index=True, null=True)),
('SIFT_score', models.TextField(blank=True, db_index=True, null=True)),
('SIFT_converted_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('Uniprot_acc_Polyphen2', models.TextField(blank=True, db_index=True, null=True)),
('Uniprot_id_Polyphen2', models.TextField(blank=True, db_index=True, null=True)),
('Uniprot_aapos_Polyphen2', models.TextField(blank=True, db_index=True, null=True)),
('Polyphen2_HDIV_score', models.TextField(blank=True, db_index=True, null=True)),
('Polyphen2_HDIV_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('Polyphen2_HDIV_pred', models.TextField(blank=True, db_index=True, null=True)),
('Polyphen2_HVAR_score', models.TextField(blank=True, db_index=True, null=True)),
('Polyphen2_HVAR_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('Polyphen2_HVAR_pred', models.TextField(blank=True, db_index=True, null=True)),
('LRT_score', models.TextField(blank=True, db_index=True, null=True)),
('LRT_converted_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('LRT_pred', models.TextField(blank=True, db_index=True, null=True)),
('LRT_Omega', models.TextField(blank=True, db_index=True, null=True)),
('MutationTaster_score', models.TextField(blank=True, db_index=True, null=True)),
('MutationTaster_converted_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('MutationTaster_pred', models.TextField(blank=True, db_index=True, null=True)),
('MutationTaster_model', models.TextField(blank=True, db_index=True, null=True)),
('MutationTaster_AAE', models.TextField(blank=True, db_index=True, null=True)),
('MutationAssessor_UniprotID', models.TextField(blank=True, db_index=True, null=True)),
('MutationAssessor_variant', models.TextField(blank=True, db_index=True, null=True)),
('MutationAssessor_score', models.TextField(blank=True, db_index=True, null=True)),
('MutationAssessor_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('MutationAssessor_pred', models.TextField(blank=True, db_index=True, null=True)),
('FATHMM_score', models.TextField(blank=True, db_index=True, null=True)),
('FATHMM_converted_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('FATHMM_pred', models.TextField(blank=True, db_index=True, null=True)),
('PROVEAN_score', models.TextField(blank=True, db_index=True, null=True)),
('PROVEAN_converted_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('PROVEAN_pred', models.TextField(blank=True, db_index=True, null=True)),
('Transcript_id_VEST3', models.TextField(blank=True, db_index=True, null=True)),
('Transcript_var_VEST3', models.TextField(blank=True, db_index=True, null=True)),
('VEST3_score', models.TextField(blank=True, db_index=True, null=True)),
('VEST3_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('MetaSVM_score', models.TextField(blank=True, db_index=True, null=True)),
('MetaSVM_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('MetaSVM_pred', models.TextField(blank=True, db_index=True, null=True)),
('MetaLR_score', models.TextField(blank=True, db_index=True, null=True)),
('MetaLR_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('MetaLR_pred', models.TextField(blank=True, db_index=True, null=True)),
('Reliability_index', models.TextField(blank=True, db_index=True, null=True)),
('CADD_raw', models.TextField(blank=True, db_index=True, null=True)),
('CADD_raw_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('CADD_phred', models.TextField(blank=True, db_index=True, null=True)),
('DANN_score', models.TextField(blank=True, db_index=True, null=True)),
('DANN_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('fathmm_MKL_coding_score', models.TextField(blank=True, db_index=True, null=True)),
('fathmm_MKL_coding_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('fathmm_MKL_coding_pred', models.TextField(blank=True, db_index=True, null=True)),
('fathmm_MKL_coding_group', models.TextField(blank=True, db_index=True, null=True)),
('Eigen_raw', models.TextField(blank=True, db_index=True, null=True)),
('Eigen_phred', models.TextField(blank=True, db_index=True, null=True)),
('Eigen_raw_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('Eigen_PC_raw', models.TextField(blank=True, db_index=True, null=True)),
('Eigen_PC_raw_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('GenoCanyon_score', models.TextField(blank=True, db_index=True, null=True)),
('GenoCanyon_score_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('integrated_fitCons_score', models.TextField(blank=True, db_index=True, null=True)),
('integrated_fitCons_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('integrated_confidence_value', models.TextField(blank=True, db_index=True, null=True)),
('GM12878_fitCons_score', models.TextField(blank=True, db_index=True, null=True)),
('GM12878_fitCons_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('GM12878_confidence_value', models.TextField(blank=True, db_index=True, null=True)),
('H1_hESC_fitCons_score', models.TextField(blank=True, db_index=True, null=True)),
('H1_hESC_fitCons_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('H1_hESC_confidence_value', models.TextField(blank=True, db_index=True, null=True)),
('HUVEC_fitCons_score', models.TextField(blank=True, db_index=True, null=True)),
('HUVEC_fitCons_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('HUVEC_confidence_value', models.TextField(blank=True, db_index=True, null=True)),
('GERP_NR', models.TextField(blank=True, db_index=True, null=True)),
('GERP_RS', models.TextField(blank=True, db_index=True, null=True)),
('GERP_RS_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('phyloP100way_vertebrate', models.TextField(blank=True, db_index=True, null=True)),
('phyloP100way_vertebrate_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('phyloP20way_mammalian', models.TextField(blank=True, db_index=True, null=True)),
('phyloP20way_mammalian_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('phastCons100way_vertebrate', models.TextField(blank=True, db_index=True, null=True)),
('phastCons100way_vertebrate_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('phastCons20way_mammalian', models.TextField(blank=True, db_index=True, null=True)),
('phastCons20way_mammalian_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('SiPhy_29way_pi', models.TextField(blank=True, db_index=True, null=True)),
('SiPhy_29way_logOdds', models.TextField(blank=True, db_index=True, null=True)),
('SiPhy_29way_logOdds_rankscore', models.TextField(blank=True, db_index=True, null=True)),
('clinvar_rs', models.TextField(blank=True, db_index=True, null=True)),
('clinvar_clnsig', models.TextField(blank=True, db_index=True, null=True)),
('clinvar_trait', models.TextField(blank=True, db_index=True, null=True)),
('clinvar_golden_stars', models.TextField(blank=True, db_index=True, null=True)),
('mcap_score', models.FloatField(blank=True, db_index=True, null=True)),
('mcap_rankscore', models.FloatField(blank=True, db_index=True, null=True)),
('mcap_pred', models.TextField(blank=True, db_index=True, null=True)),
('revel_score', models.TextField(blank=True, db_index=True, null=True)),
('individual', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='individuals.Individual')),
],
),
]
| bsd-3-clause | 7,776,012,488,357,304,000 | 83.023121 | 130 | 0.611172 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.