repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
conan-io/conan | conans/test/functional/scm/tools/test_git.py | 1 | 15486 | # coding=utf-8
import os
import re
import subprocess
import unittest
import pytest
import six
from mock import patch
from parameterized import parameterized
from conans.client import tools
from conans.client.tools.scm import Git
from conans.errors import ConanException
from conans.test.utils.scm import create_local_git_repo
from conans.test.utils.tools import temp_folder, TestClient
from conans.util.files import save
@pytest.mark.tool_git
class GitRemoteUrlTest(unittest.TestCase):
def test_remove_credentials(self):
""" Check that the 'remove_credentials' argument is taken into account """
expected_url = 'https://myrepo.com/path/to/repo.git'
origin_url = 'https://username:[email protected]/path/to/repo.git'
git = Git(folder=temp_folder())
git.run("init .")
git.run("remote add origin {}".format(origin_url))
self.assertEqual(git.get_remote_url(), origin_url)
self.assertEqual(git.get_remote_url(remove_credentials=True), expected_url)
@pytest.mark.tool_git
class GitToolTest(unittest.TestCase):
@patch('subprocess.Popen')
def test_version(self, mocked_open):
mocked_open.return_value.communicate.return_value = ('git version 2.21.0'.encode(), None)
version = Git.get_version()
self.assertEqual(version, "2.21.0")
@patch('subprocess.Popen')
def test_version_invalid(self, mocked_open):
mocked_open.return_value.communicate.return_value = ('failed'.encode(), None)
with self.assertRaises(ConanException):
Git.get_version()
def test_repo_root(self):
root_path, _ = create_local_git_repo({"myfile": "anything"})
# Initialized in the root folder
git = Git(root_path)
self.assertEqual(root_path, git.get_repo_root())
# Initialized elsewhere
subfolder = os.path.join(root_path, 'subfolder')
os.makedirs(subfolder)
git = Git(subfolder)
self.assertEqual(root_path, git.get_repo_root())
def test_is_pristine(self):
root_path, _ = create_local_git_repo({"myfile": "anything"})
git = Git(root_path)
self.assertTrue(git.is_pristine())
save(os.path.join(root_path, "other_file"), "content")
self.assertFalse(git.is_pristine())
git.run("add .")
self.assertFalse(git.is_pristine())
git.run('commit -m "commit"')
self.assertTrue(git.is_pristine())
def test_is_local_repository(self):
root_path, _ = create_local_git_repo({"myfile": "anything"})
git = Git(temp_folder())
git.clone(root_path)
self.assertTrue(git.is_local_repository())
# TODO: Check that with remote one it is working too
def test_clone_git(self):
path, _ = create_local_git_repo({"myfile": "contents"})
tmp = temp_folder()
git = Git(tmp)
git.clone(path)
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
@parameterized.expand([(None,), # default
("develop",), # branch name
("1.0",), # tag name
("HEAD",), # expression
])
def test_clone_git_shallow(self, element):
path, revision = create_local_git_repo({"myfile": "contents"}, commits=3, tags=["1.0"], branch="develop")
tmp = temp_folder()
git = Git(tmp)
git.clone("file://" + path, branch=element, shallow=True) # --depth is ignored in local clones
with self.assertRaises(subprocess.CalledProcessError):
git.checkout(element="HEAD~1")
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
self.assertEqual(git.get_revision(), revision)
self.assertEqual(git.run("rev-list --all --count"), "1")
def test_clone_git_shallow_revision(self):
path, revision = create_local_git_repo({"myfile": "contents"}, commits=3, tags=["1.0"], branch="develop")
tmp = temp_folder()
git = Git(tmp)
if Git.get_version() < "2.13":
# older Git versions have known bugs with "git fetch origin <sha>":
# https://github.com/git/git/blob/master/Documentation/RelNotes/2.13.0.txt
# * "git fetch" that requests a commit by object name, when the other
# side does not allow such an request, failed without much
# explanation.
# https://github.com/git/git/blob/master/Documentation/RelNotes/2.14.0.txt
# * There is no good reason why "git fetch $there $sha1" should fail
# when the $sha1 names an object at the tip of an advertised ref,
# even when the other side hasn't enabled allowTipSHA1InWant.
with self.assertRaises(subprocess.CalledProcessError):
git.clone("file://" + path, branch=revision, shallow=True)
else:
git.clone("file://" + path, branch=revision, shallow=True)
with self.assertRaises(subprocess.CalledProcessError):
git.checkout(element="HEAD~1")
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
self.assertEqual(git.get_revision(), revision)
self.assertEqual(git.run("rev-list --all --count"), "1")
def test_clone_git_shallow_with_local(self):
path, revision = create_local_git_repo({"repofile": "contents"}, commits=3)
tmp = temp_folder()
save(os.path.join(tmp, "localfile"), "contents")
save(os.path.join(tmp, "indexfile"), "contents")
git = Git(tmp)
git.run("init")
git.run("add indexfile")
git.clone("file://" + path, branch="master", shallow=True) # --depth is ignored in local clones
self.assertTrue(os.path.exists(os.path.join(tmp, "repofile")))
self.assertTrue(os.path.exists(os.path.join(tmp, "localfile")))
self.assertTrue(os.path.exists(os.path.join(tmp, "indexfile")))
self.assertEqual(git.get_revision(), revision)
self.assertEqual(git.run("rev-list --all --count"), "1")
def test_clone_existing_folder_git(self):
path, commit = create_local_git_repo({"myfile": "contents"}, branch="my_release")
tmp = temp_folder()
save(os.path.join(tmp, "file"), "dummy contents")
git = Git(tmp)
git.clone(path, branch="my_release")
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
# Checkout a commit
git.checkout(commit)
self.assertEqual(git.get_revision(), commit)
def test_clone_existing_folder_without_branch(self):
tmp = temp_folder()
save(os.path.join(tmp, "file"), "dummy contents")
git = Git(tmp)
with six.assertRaisesRegex(self, ConanException, "specify a branch to checkout"):
git.clone("https://github.com/conan-io/hooks.git")
def test_credentials(self):
tmp = temp_folder()
git = Git(tmp, username="peter", password="otool")
url_credentials = git.get_url_with_credentials("https://some.url.com")
self.assertEqual(url_credentials, "https://peter:[email protected]")
def test_verify_ssl(self):
class MyRunner(object):
def __init__(self):
self.calls = []
def __call__(self, *args, **kwargs):
self.calls.append(args[0])
return ""
runner = MyRunner()
tmp = temp_folder()
git = Git(tmp, username="peter", password="otool", verify_ssl=True, runner=runner,
force_english=True)
git.clone(url="https://myrepo.git")
self.assertIn("git -c http.sslVerify=true", runner.calls[0])
runner = MyRunner()
git = Git(tmp, username="peter", password="otool", verify_ssl=False, runner=runner,
force_english=False)
git.clone(url="https://myrepo.git")
self.assertIn("git -c http.sslVerify=false", runner.calls[0])
def test_clone_submodule_git(self):
subsubmodule, _ = create_local_git_repo({"subsubmodule": "contents"})
submodule, _ = create_local_git_repo({"submodule": "contents"}, submodules=[subsubmodule])
path, commit = create_local_git_repo({"myfile": "contents"}, submodules=[submodule])
def _create_paths():
tmp = temp_folder()
submodule_path = os.path.join(
tmp,
os.path.basename(os.path.normpath(submodule)))
subsubmodule_path = os.path.join(
submodule_path,
os.path.basename(os.path.normpath(subsubmodule)))
return tmp, submodule_path, subsubmodule_path
# Check old (default) behaviour
tmp, submodule_path, _ = _create_paths()
git = Git(tmp)
git.clone(path)
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
self.assertFalse(os.path.exists(os.path.join(submodule_path, "submodule")))
# Check invalid value
tmp, submodule_path, _ = _create_paths()
git = Git(tmp)
git.clone(path)
with six.assertRaisesRegex(self, ConanException,
"Invalid 'submodule' attribute value in the 'scm'."):
git.checkout(commit, submodule="invalid")
# Check shallow
tmp, submodule_path, subsubmodule_path = _create_paths()
git = Git(tmp)
git.clone(path)
git.checkout(commit, submodule="shallow")
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
self.assertTrue(os.path.exists(os.path.join(submodule_path, "submodule")))
self.assertFalse(os.path.exists(os.path.join(subsubmodule_path, "subsubmodule")))
# Check recursive
tmp, submodule_path, subsubmodule_path = _create_paths()
git = Git(tmp)
git.clone(path)
git.checkout(commit, submodule="recursive")
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
self.assertTrue(os.path.exists(os.path.join(submodule_path, "submodule")))
self.assertTrue(os.path.exists(os.path.join(subsubmodule_path, "subsubmodule")))
def test_git_to_capture_branch(self):
conanfile = """
import re
from conans import ConanFile, tools
def get_version():
git = tools.Git()
try:
branch = git.get_branch()
branch = re.sub('[^0-9a-zA-Z]+', '_', branch)
return "%s_%s" % (branch, git.get_revision())
except:
return None
class HelloConan(ConanFile):
name = "Hello"
version = get_version()
def build(self):
assert("r3le_ase__" in self.version)
assert(len(self.version) == 50)
"""
path, _ = create_local_git_repo({"conanfile.py": conanfile}, branch="r3le-ase-")
client = TestClient()
client.current_folder = path
client.run("create . user/channel")
def test_git_helper_in_recipe(self):
client = TestClient()
git_repo = temp_folder()
save(os.path.join(git_repo, "file.h"), "contents")
with client.chdir(git_repo):
client.run_command("git init .")
client.run_command('git config user.email "[email protected]"')
client.run_command('git config user.name "Your Name"')
client.run_command("git checkout -b dev")
client.run_command("git add .")
client.run_command('git commit -m "comm"')
conanfile = """
import os
from conans import ConanFile, tools
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports_sources = "other"
def source(self):
git = tools.Git()
git.clone("%s", "dev")
def build(self):
assert(os.path.exists("file.h"))
""" % git_repo.replace("\\", "/")
client.save({"conanfile.py": conanfile, "other": "hello"})
client.run("create . user/channel")
# Now clone in a subfolder with later checkout
conanfile = """
import os
from conans import ConanFile, tools
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports_sources = "other"
def source(self):
tools.mkdir("src")
git = tools.Git("./src")
git.clone("%s")
git.checkout("dev")
def build(self):
assert(os.path.exists(os.path.join("src", "file.h")))
""" % git_repo.replace("\\", "/")
client.save({"conanfile.py": conanfile, "other": "hello"})
client.run("create . user/channel")
# Base dir, with exports without subfolder and not specifying checkout fails
conanfile = """
import os
from conans import ConanFile, tools
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports_sources = "other"
def source(self):
git = tools.Git()
git.clone("%s")
def build(self):
assert(os.path.exists("file.h"))
""" % git_repo.replace("\\", "/")
client.save({"conanfile.py": conanfile, "other": "hello"})
client.run("create . user/channel", assert_error=True)
self.assertIn("specify a branch to checkout", client.out)
def test_git_commit_message(self):
client = TestClient()
git_repo = temp_folder()
with client.chdir(git_repo):
client.run_command("git init .")
client.run_command('git config user.email "[email protected]"')
client.run_command('git config user.name "Your Name"')
client.run_command("git checkout -b dev")
git = Git(git_repo)
self.assertIsNone(git.get_commit_message())
save(os.path.join(git_repo, "test"), "contents")
with client.chdir(git_repo):
client.run_command("git add test")
client.run_command('git commit -m "first commit"')
self.assertEqual("dev", git.get_branch())
self.assertEqual("first commit", git.get_commit_message())
@pytest.mark.tool_git
class GitToolsTests(unittest.TestCase):
def setUp(self):
self.folder, self.rev = create_local_git_repo({'myfile.txt': "contents"})
def test_no_tag(self):
"""
No tags has been created in repo
"""
git = Git(folder=self.folder)
tag = git.get_tag()
self.assertIsNone(tag)
def test_in_tag(self):
"""
Current checkout is on a tag
"""
git = Git(folder=self.folder)
git.run("tag 0.0.0")
tag = git.get_tag()
self.assertEqual("0.0.0", tag)
def test_in_branch_with_tag(self):
"""
Tag is defined but current commit is ahead of it
"""
git = Git(folder=self.folder)
git.run("tag 0.0.0")
save(os.path.join(self.folder, "file.txt"), "")
git.run("add .")
git.run("commit -m \"new file\"")
tag = git.get_tag()
self.assertIsNone(tag)
def test_get_tag_no_git_repo(self):
# Try to get tag out of a git repo
tmp_folder = temp_folder()
git = Git(folder=tmp_folder)
pattern = "'{0}' is not a valid 'git' repository or 'git' not found".format(
re.escape(tmp_folder))
with six.assertRaisesRegex(self, ConanException, pattern):
git.get_tag()
def test_excluded_files(self):
folder = temp_folder()
save(os.path.join(folder, "file"), "some contents")
git = Git(folder)
with tools.environment_append({"PATH": ""}):
excluded = git.excluded_files()
self.assertEqual(excluded, [])
| mit | 4,983,789,114,326,910,000 | 36.405797 | 113 | 0.597959 | false |
iceout/python_koans_practice | python2/koans/about_new_style_classes.py | 1 | 2171 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutNewStyleClasses(Koan):
class OldStyleClass:
"An old style class"
# Original class style have been phased out in Python 3.
class NewStyleClass(object):
"A new style class"
# Introduced in Python 2.2
#
# Aside from this set of tests, Python Koans sticks exclusively to this
# kind of class
pass
def test_new_style_classes_inherit_from_object_base_class(self):
self.assertEqual(____, issubclass(self.NewStyleClass, object))
self.assertEqual(____, issubclass(self.OldStyleClass, object))
def test_new_style_classes_have_more_attributes(self):
self.assertEqual(__, len(dir(self.OldStyleClass)))
self.assertEqual(__, self.OldStyleClass.__doc__)
self.assertEqual(__, self.OldStyleClass.__module__)
self.assertEqual(__, len(dir(self.NewStyleClass)))
# To examine the available attributes, run
# 'dir(<Class name goes here>)'
# from a python console
# ------------------------------------------------------------------
def test_old_style_classes_have_type_but_no_class_attribute(self):
self.assertEqual(__, self.OldStyleClass.__class__)
try:
cls = self.OldStyleClass.__class__
except Exception as ex:
pass
self.assertMatch(__, ex[0])
def test_new_style_classes_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual(__, self.NewStyleClass.__class__)
self.assertEqual(
__,
type(self.NewStyleClass) == self.NewStyleClass.__class__)
# ------------------------------------------------------------------
def test_in_old_style_instances_class_is_different_to_type(self):
old_style = self.OldStyleClass()
self.assertEqual(__, old_style.__class__)
def test_new_style_instances_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual(__, new_style.__class__)
self.assertEqual(__, type(new_style) == new_style.__class__)
| mit | 4,223,123,889,923,677,000 | 34.016129 | 79 | 0.58176 | false |
praveenkumar/ansible | lib/ansible/plugins/strategies/linear.py | 1 | 14325 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook.block import Block
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task import Task
from ansible.plugins import action_loader
from ansible.plugins.strategies import StrategyBase
from ansible.template import Templar
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
def _get_next_task_lockstep(self, hosts, iterator):
'''
Returns a list of (host, task) tuples, where the task may
be a noop task to keep the iterator in lock step across
all hosts.
'''
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
host_tasks = {}
display.debug("building list of next tasks for hosts")
for host in hosts:
host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
display.debug("done building task lists")
num_setups = 0
num_tasks = 0
num_rescue = 0
num_always = 0
lowest_cur_block = len(iterator._blocks)
display.debug("counting tasks in each state of execution")
for (k, v) in iteritems(host_tasks):
if v is None:
continue
(s, t) = v
if t is None:
continue
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
lowest_cur_block = s.cur_block
if s.run_state == PlayIterator.ITERATING_SETUP:
num_setups += 1
elif s.run_state == PlayIterator.ITERATING_TASKS:
num_tasks += 1
elif s.run_state == PlayIterator.ITERATING_RESCUE:
num_rescue += 1
elif s.run_state == PlayIterator.ITERATING_ALWAYS:
num_always += 1
display.debug("done counting tasks in each state of execution")
def _advance_selected_hosts(hosts, cur_block, cur_state):
'''
This helper returns the task for all hosts in the requested
state, otherwise they get a noop dummy task. This also advances
the state of the host, since the given states are determined
while using peek=True.
'''
# we return the values in the order they were originally
# specified in the given hosts array
rvals = []
display.debug("starting to advance hosts")
for host in hosts:
host_state_task = host_tasks[host.name]
if host_state_task is None:
continue
(s, t) = host_state_task
if t is None:
continue
if s.run_state == cur_state and s.cur_block == cur_block:
new_t = iterator.get_next_task_for_host(host)
rvals.append((host, t))
else:
rvals.append((host, noop_task))
display.debug("done advancing hosts to next task")
return rvals
# if any hosts are in ITERATING_SETUP, return the setup task
# while all other hosts get a noop
if num_setups:
display.debug("advancing hosts in ITERATING_SETUP")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)
# if any hosts are in ITERATING_TASKS, return the next normal
# task for these hosts, while all other hosts get a noop
if num_tasks:
display.debug("advancing hosts in ITERATING_TASKS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)
# if any hosts are in ITERATING_RESCUE, return the next rescue
# task for these hosts, while all other hosts get a noop
if num_rescue:
display.debug("advancing hosts in ITERATING_RESCUE")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)
# if any hosts are in ITERATING_ALWAYS, return the next always
# task for these hosts, while all other hosts get a noop
if num_always:
display.debug("advancing hosts in ITERATING_ALWAYS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)
# at this point, everything must be ITERATING_COMPLETE, so we
# return None for all hosts in the list
display.debug("all hosts are done, so returning None's for all hosts")
return [(host, None) for host in hosts]
def run(self, iterator, play_context):
'''
The linear strategy is simple - get the next task and queue
it for all hosts, then wait for the queue to drain before
moving on to the next task
'''
# iteratate over each task, while there is one left to run
result = True
work_to_do = True
while work_to_do and not self._tqm._terminated:
try:
self._display.debug("getting the remaining hosts for this loop")
hosts_left = self._inventory.get_hosts(iterator._play.hosts)
self._display.debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
callback_sent = False
work_to_do = False
host_results = []
host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
# skip control
skip_rest = False
choose_step = True
for (host, task) in host_tasks:
if not task:
continue
run_once = False
work_to_do = True
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
try:
action = action_loader.get(task.action, class_only=True)
if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run(host):
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
self._display.debug("'%s' skipped because role has already run" % task)
continue
if task.action == 'meta':
self._execute_meta(task, play_context, iterator)
else:
# handle step if needed, skip meta actions as they are used internally
if self._step and choose_step:
if self._take_step(task):
choose_step = False
else:
skip_rest = True
break
self._display.debug("getting variables")
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
self._display.debug("done getting variables")
if not callback_sent:
display.debug("sending task start callback, copying the task so we can template it temporarily")
saved_name = task.name
display.debug("done copying, going to template now")
try:
task.name = unicode(templar.template(task.name, fail_on_undefined=False))
display.debug("done templating")
except:
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
display.debug("templating failed for some reason")
pass
display.debug("here goes the callback...")
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
task.name = saved_name
callback_sent = True
display.debug("sending task start callback")
self._blocked_hosts[host.get_name()] = True
self._queue_task(host, task, task_vars, play_context)
results = self._process_pending_results(iterator)
host_results.extend(results)
# if we're bypassing the host loop, break out now
if run_once:
break
# go to next host/task group
if skip_rest:
continue
self._display.debug("done queuing things up, now waiting for results queue to drain")
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
if not work_to_do and len(iterator.get_failed_hosts()) > 0:
self._display.debug("out of hosts to run on")
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
try:
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
except AnsibleError as e:
return False
if len(included_files) > 0:
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
all_blocks = dict((host, []) for host in hosts_left)
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._display.warning(str(e))
continue
for new_block in new_blocks:
noop_block = Block(parent_block=task._block)
noop_block.block = [noop_task for t in new_block.block]
noop_block.always = [noop_task for t in new_block.always]
noop_block.rescue = [noop_task for t in new_block.rescue]
for host in hosts_left:
if host in included_file._hosts:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
final_block = new_block.filter_tagged_tasks(play_context, task_vars)
all_blocks[host].append(final_block)
else:
all_blocks[host].append(noop_block)
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
self._display.debug("results queue empty")
except (IOError, EOFError) as e:
self._display.debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
return False
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| gpl-3.0 | 7,524,632,294,346,595,000 | 45.209677 | 179 | 0.548063 | false |
The-Compiler/qutebrowser | tests/helpers/messagemock.py | 1 | 2576 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""pytest helper to monkeypatch the message module."""
import logging
import attr
import pytest
from qutebrowser.utils import usertypes, message
@attr.s
class Message:
"""Information about a shown message."""
level = attr.ib()
text = attr.ib()
class MessageMock:
"""Helper object for message_mock.
Attributes:
Message: A object representing a message.
messages: A list of Message objects.
"""
def __init__(self):
self.messages = []
def _record_message(self, level, text):
log_levels = {
usertypes.MessageLevel.error: logging.ERROR,
usertypes.MessageLevel.info: logging.INFO,
usertypes.MessageLevel.warning: logging.WARNING,
}
log_level = log_levels[level]
logging.getLogger('messagemock').log(log_level, text)
self.messages.append(Message(level, text))
def getmsg(self, level=None):
"""Get the only message in self.messages.
Raises ValueError if there are multiple or no messages.
Args:
level: The message level to check against, or None.
"""
assert len(self.messages) == 1
msg = self.messages[0]
if level is not None:
assert msg.level == level
return msg
def patch(self):
"""Start recording messages."""
message.global_bridge.show_message.connect(self._record_message)
message.global_bridge._connected = True
def unpatch(self):
"""Stop recording messages."""
message.global_bridge.show_message.disconnect(self._record_message)
@pytest.fixture
def message_mock():
"""Fixture to get a MessageMock."""
mmock = MessageMock()
mmock.patch()
yield mmock
mmock.unpatch()
| gpl-3.0 | 6,713,423,093,732,650,000 | 27 | 75 | 0.667314 | false |
mholgatem/GPIOnext | config/menus.py | 1 | 4183 | import time
from config.constants import *
from config import SQL
from cursesmenu import *
from cursesmenu.items import *
import curses
'''
---------------------------------------------------------
This script handles menu navigation
RETURNS: dictionary containing device name,
number of buttons, number of axis
---------------------------------------------------------
'''
GOTO_MAIN = -999
def close():
if CursesMenu.stdscr != None:
CursesMenu().exit()
def clearPreviousMenu():
# clear any previous menus
if CursesMenu.stdscr != None:
CursesMenu.stdscr.erase()
def showMainMenu():
global currentDevice
clearPreviousMenu()
currentDevice = {'name': None,
'axisCount': 0,
'buttons': 0}
options = DEVICE_LIST + ['Clear Device']
choice = SelectionMenu.get_selection(
strings = options,
title = 'GPIOnext Config',
subtitle = 'Which virtual device do you want to CONFIGURE?'
)
try:
currentDevice['name'] = options [ choice ]
except IndexError: # user selected 'Exit'
return None
if currentDevice['name'] == 'Clear Device':
return clearDevice()
elif currentDevice['name']== 'Keyboard':
title = 'Select the keys that you want to assign'
return selectFromList( KEY_LIST, title )
elif currentDevice['name'] == 'Commands':
return currentDevice
else:
return getJoyAxisCount()
def clearDevice():
clearPreviousMenu()
options = DEVICE_LIST + ['← Return to Main Menu']
choice = SelectionMenu.get_selection(
strings = options,
title = 'CLEAR DEVICE',
subtitle = 'Remove configs for which device?',
exit_option = False
)
currentDevice['name'] = options[choice]
if currentDevice['name'] == '← Return to Main Menu':
return GOTO_MAIN
else:
clearPreviousMenu()
print( 'Deleting config files for {0}...'.format( currentDevice['name'] ))
SQL.deleteDevice( currentDevice['name'] )
time.sleep(1)
return clearDevice()
def getJoyAxisCount( ):
global currentDevice
clearPreviousMenu()
axisList = ['0','1','2','3','4','← Return to Main Menu']
dpadCount = SelectionMenu.get_selection(
strings = axisList,
title = 'Configuring {0}'.format( currentDevice['name'] ),
subtitle = 'How many Dpads/Joysticks does this controller have?',
exit_option = False
)
currentDevice['axisCount'] = dpadCount
# if Return to Main Menu
if dpadCount == 5:
return GOTO_MAIN
else:
title = 'Select the buttons that you want to assign'
return selectFromList( BUTTON_LIST, title)
def editCommandButton():
global currentDevice
cmdList = SQL.getDeviceRaw( 'Commands' )
entries = [ '• Edit Command: {0}'.format( x['name'] ) for x in cmdList ]
entries.insert( 0, '• Add New Command' )
entries.append( '← Return to Main Menu' )
edit = 2
while edit == 2:
clearPreviousMenu()
choice = SelectionMenu.get_selection(
strings = entries,
title = 'Configuring {0}'.format( currentDevice['name'] ),
subtitle = 'Select a command to edit',
exit_option = False
)
if choice == 0:
return ( 'EDIT', {'command':'', 'pins': None, 'id': None, 'device': None, 'name': '', 'type':'COMMAND' } )
elif choice == len( entries ) - 1:
return GOTO_MAIN
clearPreviousMenu()
edit = SelectionMenu.get_selection(
strings = ['Edit', 'Delete', '← Go Back' ],
title = 'Configuring {0}'.format( cmdList[ choice - 1 ]['name'] ),
subtitle = 'Edit or Delete this command?',
exit_option = False
)
edit = 'EDIT' if edit == 0 else 'DELETE'
return ( edit, cmdList[ choice - 1 ] )
def selectFromList( currentList, title ):
global currentDevice
buttonNames = [ b[0] for b in currentList ]
buttonNames.append( '← Return to Main Menu' )
# returns list of buttons to configure
choice = MultiSelect.get_selection(
strings = buttonNames,
title = title,
exit_option = False
)
# return to main menu
if choice == [-1]:
return GOTO_MAIN
chosenButtons = [b for b in currentList if b[0] in choice]
currentDevice['buttons'] = chosenButtons
return currentDevice
| mit | -1,539,677,668,419,638,800 | 27.346939 | 109 | 0.63067 | false |
Statoil/libres | python/tests/res/enkf/data/test_custom_kw.py | 1 | 4935 | import os
import pytest
from res.enkf.enums import EnkfRunType
from res.enkf import ErtRunContext
from res.enkf.config import CustomKWConfig
from res.enkf.data import CustomKW
from res.enkf.enkf_simulation_runner import EnkfSimulationRunner
from res.enkf.export import custom_kw_collector
from res.enkf.export.custom_kw_collector import CustomKWCollector
from res.test.ert_test_context import ErtTestContext
from tests import ResTest
from ecl.util.test.test_area import TestAreaContext
from ecl.util.util import StringList
from ecl.util.util import BoolVector
from tests.utils import tmpdir
class CustomKWTest(ResTest):
def createResultFile(self, filename, data):
with open(filename, "w") as output_file:
for key in data:
output_file.write("%s %s\n" % (key, data[key]))
def test_custom_kw_creation(self):
data = {"VALUE_1": 2345.234,
"VALUE_2": 0.001234,
"VALUE_3": "string_1",
"VALUE_4": "string_2"}
with TestAreaContext("python/enkf/data/custom_kw_creation") as test_area:
self.createResultFile("result_file", data)
custom_kw_config = CustomKWConfig("CUSTOM_KW", "result_file")
self.assertEqual(len(custom_kw_config), 0)
custom_kw = CustomKW(custom_kw_config)
custom_kw.fload("result_file")
self.assertEqual(len(custom_kw_config), 4)
for key in data:
index = custom_kw_config.indexOfKey(key)
self.assertEqual(data[key], custom_kw[key])
with self.assertRaises(KeyError):
value = custom_kw["VALUE_5"]
def test_custom_kw_config_data_is_null(self):
data_1 = {"VALUE_1": 123453.3,
"VALUE_2": 0.234234}
data_2 = {"VALUE_1": 965689,
"VALUE_3": 1.1222}
with TestAreaContext("python/enkf/data/custom_kw_null_element") as test_area:
self.createResultFile("result_file_1", data_1)
self.createResultFile("result_file_2", data_2)
custom_kw_config = CustomKWConfig("CUSTOM_KW", "result_file")
custom_kw_1 = CustomKW(custom_kw_config)
custom_kw_1.fload("result_file_1")
custom_kw_2 = CustomKW(custom_kw_config)
custom_kw_2.fload("result_file_2")
index_1 = custom_kw_config.indexOfKey("VALUE_1")
index_2 = custom_kw_config.indexOfKey("VALUE_2")
self.assertEqual(custom_kw_1["VALUE_1"], data_1["VALUE_1"])
self.assertEqual(custom_kw_2["VALUE_1"], data_2["VALUE_1"])
self.assertIsNone(custom_kw_2["VALUE_2"])
self.assertFalse( "VALUE_3" in custom_kw_config )
@tmpdir()
def test_simulated_custom_kw(self):
config = self.createTestPath("local/custom_kw/mini_config")
with ErtTestContext("python/enkf/data/custom_kw_simulated", config) as context:
ert = context.getErt()
ensemble_config = ert.ensembleConfig()
self.assertTrue("AGGREGATED" in ensemble_config)
config = ensemble_config.getNode("AGGREGATED").getCustomKeywordModelConfig()
self.assertEqual(len(config.getKeys()), 0)
simulation_runner = EnkfSimulationRunner(ert)
job_queue = ert.get_queue_config().create_job_queue()
iteration_count = 0
active = BoolVector(default_value = True, initial_size = 4)
subst_list = ert.getDataKW( )
runpath_fmt = ert.getModelConfig( ).getRunpathFormat( )
fs_manager = ert.getEnkfFsManager( )
fs = fs_manager.getFileSystem("fs")
jobname_fmt = ert.getModelConfig( ).getJobnameFormat( )
run_context = ErtRunContext( EnkfRunType.ENSEMBLE_EXPERIMENT , fs, None , active , runpath_fmt, jobname_fmt, subst_list , iteration_count)
simulation_runner.createRunPath( run_context )
simulation_runner.runEnsembleExperiment(job_queue, run_context)
config = ensemble_config.getNode("AGGREGATED").getCustomKeywordModelConfig()
self.assertEqual(len(config.getKeys()), 4)
self.assertItemsEqual(config.getKeys(), ["PERLIN_1", "PERLIN_2", "PERLIN_3", "STATE"])
def test_custom_kw_set_values(self):
definition = {
"STRING": str,
"FLOAT": float,
"INT": float
}
ckwc = CustomKWConfig("Test", None, definition=definition)
ckw = CustomKW(ckwc)
with self.assertRaises(KeyError):
ckw["ANOTHER_STRING"] = "another string"
ckw["STRING"] = "string"
ckw["FLOAT"] = 3.1415
ckw["INT"] = 1
self.assertEqual(ckw["STRING"], "string")
self.assertEqual(ckw["FLOAT"], 3.1415)
self.assertEqual(ckw["INT"], 1)
| gpl-3.0 | 4,995,426,904,428,600,000 | 33.51049 | 150 | 0.604458 | false |
kashefy/nideep | nideep/iow/test_read_img.py | 1 | 5461 | '''
Created on Oct 30, 2015
@author: kashefy
'''
from nose.tools import assert_equal, assert_almost_equals
from mock import patch
import os
import tempfile
import shutil
import numpy as np
import cv2 as cv2
import read_img as r
class TestReadImage:
@classmethod
def setup_class(self):
self.dir_tmp = tempfile.mkdtemp()
self.img1 = np.array([[[ 1, 2, 3],
[ 4, 5, 6]
],
[[ 7, 8, 9],
[10, 11, 12]
],
[[13, 14, 15],
[16, 17, 18],
],
[[19, 20, 21],
[22, 23, 24]
]
])
self.path_img1 = os.path.join(self.dir_tmp, "a.png")
cv2.imwrite(self.path_img1, self.img1)
@classmethod
def teardown_class(self):
shutil.rmtree(self.dir_tmp)
def test_read_img_cv2_shape(self):
img = r.read_img_cv2(self.path_img1)
assert_equal(img.shape, (3, 4, 2))
def test_read_img_cv2_pixels(self):
img = r.read_img_cv2(self.path_img1)
for ch in range(3):
for row in range(4):
for col in range(2):
assert_equal(img[ch][row][col], self.img1[row][col][ch])
def test_read_img_cv2_dtype(self):
img = r.read_img_cv2(self.path_img1)
assert_equal(img.dtype, np.dtype('uint8'))
def test_read_img_cv2_subtract_mean(self):
m = np.array((1., 2. , 3.))
img = r.read_img_cv2(self.path_img1, mean=m)
for ch in range(3):
for row in range(4):
for col in range(2):
assert_equal(img[ch][row][col], self.img1[row][col][ch] - m[ch])
def test_read_img_PIL_shape(self):
assert_equal(r.read_img_PIL(self.path_img1).shape, (3, 4, 2))
def test_read_img_PIL_pixels(self):
img = r.read_img_PIL(self.path_img1)
for ch in range(3):
for row in range(4):
for col in range(2):
assert_equal(img[ch][row][col], self.img1[row][col][ch])
def test_read_img_PIL_subtract_mean(self):
m = np.array((1., 2. , 3.))
img = r.read_img_PIL(self.path_img1, mean=m)
for ch in range(3):
for row in range(4):
for col in range(2):
assert_equal(img[ch][row][col], self.img1[row][col][ch] - m[ch])
@patch('nideep.iow.read_img.caffe.io')
def test_read_img_caf_shape(self, mock_io):
mock_io.load_image.return_value = \
np.array([[[0.01176471, 0.00784314, 0.00392157],
[0.02352941, 0.01960784, 0.01568628]
],
[[0.03529412, 0.03137255, 0.02745098],
[0.04705882, 0.04313726, 0.03921569],
],
[[0.05882353, 0.05490196, 0.05098039],
[0.07058824, 0.06666667, 0.0627451 ]
],
[[0.08235294, 0.07843138, 0.07450981],
[0.09411765, 0.09019608, 0.08627451]
]
])
assert_equal(r.read_img_caf(self.path_img1).shape, (3, 4, 2))
@patch('nideep.iow.read_img.caffe.io')
def test_read_img_caf_pixels(self, mock_io):
mock_io.load_image.return_value = \
np.array([[[0.01176471, 0.00784314, 0.00392157],
[0.02352941, 0.01960784, 0.01568628]
],
[[0.03529412, 0.03137255, 0.02745098],
[0.04705882, 0.04313726, 0.03921569],
],
[[0.05882353, 0.05490196, 0.05098039],
[0.07058824, 0.06666667, 0.0627451 ]
],
[[0.08235294, 0.07843138, 0.07450981],
[0.09411765, 0.09019608, 0.08627451]
]
])
img = r.read_img_caf(self.path_img1)
for ch in range(3):
for row in range(4):
for col in range(2):
assert_almost_equals(img[ch][row][col], self.img1[row][col][ch], places=5)
@patch('nideep.iow.read_img.caffe.io')
def test_read_img_caf_subtract_mean(self, mock_io):
mock_io.load_image.return_value = \
np.array([[[0.01176471, 0.00784314, 0.00392157],
[0.02352941, 0.01960784, 0.01568628]
],
[[0.03529412, 0.03137255, 0.02745098],
[0.04705882, 0.04313726, 0.03921569],
],
[[0.05882353, 0.05490196, 0.05098039],
[0.07058824, 0.06666667, 0.0627451 ]
],
[[0.08235294, 0.07843138, 0.07450981],
[0.09411765, 0.09019608, 0.08627451]
]
])
m = np.array((1., 2. , 3.))
img = r.read_img_caf(self.path_img1, mean=m)
for ch in range(3):
for row in range(4):
for col in range(2):
assert_almost_equals(img[ch][row][col], self.img1[row][col][ch] - m[ch], places=5)
| bsd-2-clause | 3,508,980,782,283,328,000 | 32.29878 | 102 | 0.449551 | false |
mahim97/zulip | zerver/management/commands/set_default_streams.py | 8 | 1912 |
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Any, Dict, Text
from zerver.lib.actions import set_default_streams
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Set default streams for a realm
Users created under this realm will start out with these streams. This
command is not additive: if you re-run it on a realm with a different
set of default streams, those will be the new complete set of default
streams.
For example:
./manage.py set_default_streams --realm=foo --streams=foo,bar,baz
./manage.py set_default_streams --realm=foo --streams="foo,bar,baz with space"
./manage.py set_default_streams --realm=foo --streams=
"""
# Fix support for multi-line usage
def create_parser(self, *args: Any, **kwargs: Any) -> ArgumentParser:
parser = super().create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('-s', '--streams',
dest='streams',
type=str,
help='A comma-separated list of stream names.')
self.add_realm_args(parser, True)
def handle(self, **options: str) -> None:
realm = self.get_realm(options)
if options["streams"] is None:
print("Please provide a default set of streams (which can be empty,\
with `--streams=`).", file=sys.stderr)
exit(1)
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
stream_dict = {
stream.strip(): {"description": stream.strip(), "invite_only": False}
for stream in options["streams"].split(",")
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(realm, stream_dict)
| apache-2.0 | -3,494,797,348,858,399,000 | 36.490196 | 81 | 0.648536 | false |
endlessm/chromium-browser | third_party/shaderc/src/glslc/test/option_dash_M.py | 3 | 33038 | # Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
import os.path
import sys
from environment import File, Directory
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader
from glslc_test_framework import GlslCTest
MINIMAL_SHADER = '#version 140\nvoid main() {}'
EMPTY_SHADER_IN_CURDIR = Directory('.', [File('shader.vert', MINIMAL_SHADER)])
EMPTY_SHADER_IN_SUBDIR = Directory('subdir',
[File('shader.vert', MINIMAL_SHADER)])
def process_test_specified_dependency_info_rules(test_specified_rules):
"""A helper function to process the expected dependency info rules
specified in tests before checking the actual dependency rule output.
This is required because the filename and path of temporary files created
through FileShader is unknown at the time the expected dependency info rules
are declared.
Note this function process the given rule list in-place.
"""
for rule in test_specified_rules:
# If the 'target' value is not a hard-coded file name but a
# FileShader, we need its full path, append extension to it and
# strip the directory component from it to get the complete target
# name.
if isinstance(rule['target'], FileShader):
rule['target'] = rule['target'].filename
if 'target_extension' in rule:
if rule['target_extension'] is not None:
rule['target'] = rule['target'] + rule['target_extension']
rule.pop('target_extension')
rule['target'] = os.path.basename(rule['target'])
# The dependency set may have FileShader too, we need to replace
# them with their absolute paths.
dependent_file_name_set = set()
for dependent_file in rule['dependency']:
if isinstance(dependent_file, FileShader):
dependent_file_name_set.add(dependent_file.filename)
else:
dependent_file_name_set.add(dependent_file)
rule['dependency'] = dependent_file_name_set
def parse_text_rules(text_lines):
""" A helper function to read text lines and construct and returns a list of
dependency rules which can be used for comparison.
The list is built with the text order. Each rule is described in the
following way:
{'target': <target name>, 'dependency': <set of dependent filenames>}
"""
rules = []
for line in text_lines:
if line.strip() == "":
continue
rule = {'target': line.split(': ')[0].strip(),
'dependency': set(line.split(': ')[-1].strip().split(' '))}
rules.append(rule)
return rules
class DependencyInfoStdoutMatch(GlslCTest):
"""Mixin class for tests that can expect dependency info in Stdout.
To mix in this class, the subclass needs to provide
dependency_rules_expected as a list of dictionaries, each dictionary
describes one expected make rule for a target file. A expected rule should
be specified in the following way:
rule = {'target': <target name>,
'target_extension': <.spv, .spvasm or None>,
'dependency': <dependent file names>}
The 'target_extension' field is optional, its value will be appended to
'target' to get complete target name.
And the list 'dependency_rules_expected' is a list of such rules and the
order of the rules does matter.
"""
def check_stdout_dependency_info(self, status):
if not status.stdout:
return False, 'Expect dependency rules on stdout'
if sys.version_info[0] == 2:
rules = parse_text_rules(status.stdout.decode('utf-8').split('\n'))
elif sys.version_info[0] == 3:
rules = parse_text_rules(str(status.stdout,
encoding='utf-8',
errors='ignore').split('\n'))
process_test_specified_dependency_info_rules(
self.dependency_rules_expected)
if self.dependency_rules_expected != rules:
return False, ('Incorrect dependency info:\n{ac_rules}\n'
'Expected:\n{ex_rules}\n'
'Stdout output:\n{ac_stdout}\n'.format(
ac_rules=rules,
ex_rules=self.dependency_rules_expected,
ac_stdout=status.stdout))
return True, ''
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathNoInclude(DependencyInfoStdoutMatch):
"""Tests -M with single input file which doesn't contain #include and is
represented in relative path.
e.g. glslc -M shader.vert
=> shader.vert.spv: shader.vert
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-M', 'shader.vert']
dependency_rules_expected = [{'target': "shader.vert.spv",
'dependency': {"shader.vert"}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputAbsolutePathNoInclude(DependencyInfoStdoutMatch):
"""Tests -M with single input file which doesn't contain #include and is
represented in absolute path.
e.g. glslc -M /usr/local/shader.vert
=> shader.vert.spv: /usr/local/shader.vert
"""
shader = FileShader(MINIMAL_SHADER, '.vert')
glslc_args = ['-M', shader]
dependency_rules_expected = [{'target': shader,
'target_extension': '.spv',
'dependency': {shader}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathWithInclude(
DependencyInfoStdoutMatch):
"""Tests -M with single input file which does contain #include and is
represented in relative path.
e.g. glslc -M a.vert
=> a.vert.spv: a.vert b.vert
"""
environment = Directory('.', [
File('a.vert', '#version 140\n#include "b.vert"\nvoid main(){}\n'),
File('b.vert', 'void foo(){}\n'),
])
glslc_args = ['-M', 'a.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert', 'b.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathWithIncludeSubdir(
DependencyInfoStdoutMatch):
"""Tests -M with single input file which does #include another file in a
subdirectory of current directory and is represented in relative path.
e.g. glslc -M a.vert
=> a.vert.spv: a.vert include/b.vert
"""
environment = Directory('.', [
File('a.vert', ('#version 140\n#include "include/b.vert"'
'\nvoid main(){}\n')),
Directory('include', [File('b.vert', 'void foo(){}\n')]),
])
glslc_args = ['-M', 'a.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert', 'include/b.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathWithDashI(DependencyInfoStdoutMatch):
"""Tests -M with single input file works with -I option. The #include
directive does not specify 'include/' for the file to be include.
e.g. glslc -M a.vert -I include
=> a.vert.spv: a.vert include/b.vert
"""
environment = Directory('.', [
File('a.vert', ('#version 140\n#include "b.vert"'
'\nvoid main(){}\n')),
Directory('include', [File('b.vert', 'void foo(){}\n')]),
])
glslc_args = ['-M', 'a.vert', '-I', 'include']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert', 'include/b.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathWithNestedInclude(
DependencyInfoStdoutMatch):
"""Tests -M with single input file under nested #include case. The input file
is represented in relative path.
e.g. glslc -M a.vert
=> a.vert.spv: a.vert b.vert c.vert
"""
environment = Directory('.', [
File('a.vert', '#version 140\n#include "b.vert"\nvoid main(){}\n'),
File('b.vert', 'void foo(){}\n#include "c.vert"\n'),
File('c.vert', 'void bar(){}\n'),
])
glslc_args = ['-M', 'a.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency':
{'a.vert', 'b.vert', 'c.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMultipleInputRelativePathNoInclude(
DependencyInfoStdoutMatch):
"""Tests -M with multiple input file which don't contain #include and are
represented in relative paths.
e.g. glslc -M a.vert b.vert
=> a.vert.spv: a.vert
b.vert.spv: b.vert
"""
environment = Directory('.', [
File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER),
])
glslc_args = ['-M', 'a.vert', 'b.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert'}},
{'target': 'b.vert.spv',
'dependency': {'b.vert'}}, ]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMultipleInputAbsolutePathNoInclude(
DependencyInfoStdoutMatch):
"""Tests -M with single input file which doesn't contain #include and is
represented in absolute path.
e.g. glslc -M /usr/local/a.vert /usr/local/b.vert
=> a.vert.spv: /usr/local/a.vert
b.vert.spv: /usr/local/b.vert
"""
shader_a = FileShader(MINIMAL_SHADER, '.vert')
shader_b = FileShader(MINIMAL_SHADER, '.vert')
glslc_args = ['-M', shader_a, shader_b]
dependency_rules_expected = [{'target': shader_a,
'target_extension': '.spv',
'dependency': {shader_a}},
{'target': shader_b,
'target_extension': '.spv',
'dependency': {shader_b}}, ]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDashCapMT(DependencyInfoStdoutMatch):
"""Tests -MT works with -M. User can specify the target object name in the
generated dependency info.
e.g. glslc -M shader.vert -MT target
=> target: shader.vert
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-M', 'shader.vert', '-MT', 'target']
dependency_rules_expected = [{'target': 'target',
'dependency': {'shader.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMInputAbsolutePathWithInclude(DependencyInfoStdoutMatch):
"""Tests -M have included files represented in absolute paths when the input
file is represented in absolute path.
E.g. Assume a.vert has '#include "b.vert"'
glslc -M /usr/local/a.vert
=> a.vert.spv: /usr/local/a.vert /usr/local/b.vert
"""
environment = Directory('.', [File('b.vert', 'void foo(){}\n')])
shader_main = FileShader(
'#version 140\n#include "b.vert"\nvoid main(){}\n', '.vert')
glslc_args = ['-M', shader_main]
dependency_rules_expected = [{
'target': shader_main,
'target_extension': '.spv',
'dependency': {shader_main}
# The dependency here is not complete. we can not get the absolute path
# of b.vert here. It will be added in check_stdout_dependency_info()
}]
def check_stdout_dependency_info(self, status):
# Add the absolute path of b.vert to the dependency set
self.dependency_rules_expected[0]['dependency'].add(os.path.dirname(
self.shader_main.filename) + '/b.vert')
return DependencyInfoStdoutMatch.check_stdout_dependency_info(self,
status)
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputAbsolutePathWithIncludeSubdir(
DependencyInfoStdoutMatch):
"""Tests -M with single input file which does #include another file in a
subdirectory of current directory and is represented in absolute path.
e.g. glslc -M /usr/local/a.vert
=> a.vert.spv: /usr/local/a.vert /usr/local/include/b.vert
"""
environment = Directory('.', [
Directory('include', [File('b.vert', 'void foo(){}\n')]),
])
shader_main = FileShader('#version 140\n#include "include/b.vert"\n',
'.vert')
glslc_args = ['-M', shader_main]
dependency_rules_expected = [{
'target': shader_main,
'target_extension': '.spv',
'dependency': {shader_main}
# The dependency here is not complete. we can not get the absolute
# path of include/b.vert here. It will be added in
# check_stdout_dependency_info()
}]
def check_stdout_dependency_info(self, status):
# Add the absolute path of include/b.vert to the dependency set
self.dependency_rules_expected[0]['dependency'].add(os.path.dirname(
self.shader_main.filename) + '/include/b.vert')
return DependencyInfoStdoutMatch.check_stdout_dependency_info(self,
status)
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMOverridesOtherModes(DependencyInfoStdoutMatch):
"""Tests -M overrides other compiler mode options, includeing -E, -c and -S.
"""
environment = Directory('.', [
File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER),
])
glslc_args = ['-M', '-E', '-c', '-S', 'a.vert', 'b.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert'}},
{'target': 'b.vert.spv',
'dependency': {'b.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMEquivalentToCapM(DependencyInfoStdoutMatch):
"""Tests that -MM behaves as -M.
e.g. glslc -MM shader.vert
=> shader.vert.spv: shader.vert
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MM', 'shader.vert']
dependency_rules_expected = [{'target': 'shader.vert.spv',
'dependency': {'shader.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMImpliesDashCapE(DependencyInfoStdoutMatch,
expect.NoOutputOnStderr):
"""Tests that -M implies -E, a .glsl file without an explict stage should
not generate an error.
e.g. glslc -M shader.glsl
=> shader.spv: shader.glsl
<no error message should be generated>
"""
environment = Directory('.', [File('shader.glsl', MINIMAL_SHADER)])
glslc_args = ['-M', 'shader.glsl']
dependency_rules_expected = [{'target': 'shader.spv',
'dependency': {'shader.glsl'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMImpliesDashW(DependencyInfoStdoutMatch,
expect.NoOutputOnStderr):
"""Tests that -M implies -w, a deprecated attribute should not generate
warning message.
e.g. glslc -M shader.vert
=> shader.vert.spv: shader.vert
<no warning message should be generated>
"""
environment = Directory('.', [File(
'shader.vert', """#version 400
layout(location=0) attribute float x;
void main() {}""")])
glslc_args = ['-M', 'shader.vert']
dependency_rules_expected = [{'target': 'shader.vert.spv',
'dependency': {'shader.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMImpliesDashCapE(DependencyInfoStdoutMatch,
expect.NoOutputOnStderr):
"""Tests that -M implies -E, a .glsl file without an explict stage should
not generate an error.
e.g. glslc -MM shader.glsl
=> shader.spv: shader.glsl
<no error message should be generated>
"""
environment = Directory('.', [File('shader.glsl', MINIMAL_SHADER)])
glslc_args = ['-MM', 'shader.glsl']
dependency_rules_expected = [{'target': 'shader.spv',
'dependency': {'shader.glsl'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMImpliesDashW(DependencyInfoStdoutMatch,
expect.NoOutputOnStderr):
"""Tests that -MM implies -w, a deprecated attribute should not generate
warning message.
e.g. glslc -MM shader.vert
=> shader.vert.spv: shader.vert
<no warning message should be generated>
"""
environment = Directory('.', [File(
'shader.vert', """
#version 400
layout(location = 0) attribute float x;
void main() {}""")])
glslc_args = ['-MM', 'shader.vert']
dependency_rules_expected = [{'target': 'shader.vert.spv',
'dependency': {'shader.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMD(expect.ValidFileContents, expect.ValidNamedObjectFile):
"""Tests that -MD generates dependency info file and compilation output.
e.g. glslc -MD shader.vert
=> <a.spv: valid SPIR-V object file>
=> <shader.vert.spv.d: dependency info>
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MD', 'shader.vert']
expected_object_filenames = ('a.spv', )
target_filename = 'shader.vert.spv.d'
expected_file_contents = ['shader.vert.spv: shader.vert\n']
class DependencyInfoFileMatch(GlslCTest):
"""Mixin class for tests that can expect dependency info files.
To mix in this class, subclasses need to provide dependency_info_filenames
and dependency_info_files_expected_contents which are two lists.
list dependency_info_filenames contains the dependency info file names and
list dependency_info_files_expected_contents contains the expected matching
dependency rules.
The item order of the two lists should match, which means:
dependency_info_files_expected_contents[i] should describe the
dependency rules saved in dependency_info_filenames[i]
The content of each dependency info file is described in same 'list of dict'
structure explained in class DependencyInfoStdoutMatch's doc string.
"""
def check_dependency_info_files(self, status):
dep_info_files = \
[os.path.join(status.directory,
f) for f in self.dependency_info_filenames]
for i, df in enumerate(dep_info_files):
if not os.path.isfile(df):
return False, 'Cannot find file: ' + df
try:
with open(df, 'r') as dff:
content = dff.read()
rules = parse_text_rules(content.split('\n'))
process_test_specified_dependency_info_rules(
self.dependency_info_files_expected_contents[i])
if self.dependency_info_files_expected_contents[
i] != rules:
return False, (
'Incorrect dependency info:\n{ac_rules}\n'
'Expected:\n{ex_rules}\n'
'Incorrect file output:\n{ac_out}\n'
'Incorrect dependency info file:\n{ac_file}\n'.format(
ac_rules=rules,
ex_rules=self.dependency_rules_expected,
ac_stdout=content,
ac_file=df))
except IOError:
return False, ('Could not open dependency info file ' + df +
' for reading')
return True, ''
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMWorksWithDashO(DependencyInfoFileMatch):
"""Tests -M works with -o option. When user specifies an output file name
with -o, the dependency info should be dumped to the user specified output
file.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-M', 'shader.vert', '-o', 'dep_info']
dependency_info_filenames = ('dep_info', )
dependency_info_files_expected_contents = []
dependency_info_files_expected_contents.append(
[{'target': 'shader.vert.spv',
'dependency': {'shader.vert'}}])
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDMultipleFile(expect.ValidNamedObjectFile,
DependencyInfoFileMatch):
"""Tests that -MD generates dependency info file for multiple files.
e.g. glslc -MD a.vert b.vert -c
=> <a.vert.spv: valid SPIR-V object file>
=> <a.vert.spv.d: dependency info: "a.vert.spv: a.vert">
=> <b.vert.spv: valid SPIR-V object file>
=> <b.vert.spv.d: dependency info: "b.vert.spv: b.vert">
"""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-MD', 'a.vert', 'b.vert', '-c']
expected_object_filenames = ('a.vert.spv', 'b.vert.spv', )
dependency_info_filenames = ['a.vert.spv.d', 'b.vert.spv.d']
dependency_info_files_expected_contents = []
dependency_info_files_expected_contents.append([{'target': 'a.vert.spv',
'dependency': {'a.vert'}}
])
dependency_info_files_expected_contents.append([{'target': 'b.vert.spv',
'dependency': {'b.vert'}}
])
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDMultipleFilePreprocessingOnlyMode(expect.StdoutMatch,
DependencyInfoFileMatch):
"""Tests that -MD generates dependency info file for multiple files in
preprocessing only mode.
e.g. glslc -MD a.vert b.vert -E
=> stdout: preprocess result of a.vert and b.vert
=> <a.vert.spv.d: dependency info: "a.vert.spv: a.vert">
=> <b.vert.spv.d: dependency info: "b.vert.spv: b.vert">
"""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-MD', 'a.vert', 'b.vert', '-E']
dependency_info_filenames = ['a.vert.spv.d', 'b.vert.spv.d']
dependency_info_files_expected_contents = []
dependency_info_files_expected_contents.append([{'target': 'a.vert.spv',
'dependency': {'a.vert'}}
])
dependency_info_files_expected_contents.append([{'target': 'b.vert.spv',
'dependency': {'b.vert'}}
])
expected_stdout = ("#version 140\nvoid main(){ }\n"
"#version 140\nvoid main(){ }\n")
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDMultipleFileDisassemblyMode(expect.ValidNamedAssemblyFile,
DependencyInfoFileMatch):
"""Tests that -MD generates dependency info file for multiple files in
disassembly mode.
e.g. glslc -MD a.vert b.vert -S
=> <a.vert.spvasm: valid SPIR-V assembly file>
=> <a.vert.spvasm.d: dependency info: "a.vert.spvasm: a.vert">
=> <b.vert.spvasm: valid SPIR-V assembly file>
=> <b.vert.spvasm.d: dependency info: "b.vert.spvasm: b.vert">
"""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-MD', 'a.vert', 'b.vert', '-S']
expected_assembly_filenames = ('a.vert.spvasm', 'b.vert.spvasm', )
dependency_info_filenames = ['a.vert.spvasm.d', 'b.vert.spvasm.d']
dependency_info_files_expected_contents = []
dependency_info_files_expected_contents.append([{'target': 'a.vert.spvasm',
'dependency': {'a.vert'}}
])
dependency_info_files_expected_contents.append([{'target': 'b.vert.spvasm',
'dependency': {'b.vert'}}
])
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMT(expect.ValidFileContents, expect.ValidNamedObjectFile):
"""Tests that -MT generates dependency info file with specified target label.
e.g. glslc -MD shader.vert -MT target_label
=> <a.spv: valid SPIR-V object file>
=> <shader.vert.spv.d: dependency info: "target_label: shader.vert">
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MD', 'shader.vert', '-MT', 'target_label']
expected_object_filenames = ('a.spv', )
target_filename = 'shader.vert.spv.d'
expected_file_contents = ['target_label: shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMF(expect.ValidFileContents, expect.ValidNamedObjectFile):
"""Tests that -MF dumps dependency info into specified file.
e.g. glslc -MD shader.vert -MF dep_file
=> <a.spv: valid SPIR-V object file>
=> <dep_file: dependency info: "shader.vert.spv: shader.vert">
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MD', 'shader.vert', '-MF', 'dep_file']
expected_object_filenames = ('a.spv', )
target_filename = 'dep_file'
expected_file_contents = ['shader.vert.spv: shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDSpecifyOutputFileName(expect.ValidFileContents,
expect.ValidNamedObjectFile):
"""Tests that -MD has the default dependency info file name and target
label correct when -o <output_file_name> appears in the command line.
The default dependency info file name and target label should be deduced
from the linking-disabled compilation output.
e.g. glslc -MD subdir/shader.vert -c -o output
=> <./output: valid SPIR-V object file>
=> <./output.d: dependency info: "output: shader.vert">
"""
environment = EMPTY_SHADER_IN_SUBDIR
glslc_args = ['-MD', 'subdir/shader.vert', '-c', '-o', 'output']
expected_object_filenames = ('output', )
target_filename = 'output.d'
expected_file_contents = ['output: subdir/shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDWithDashMFDashMTDashO(expect.ValidFileContents,
expect.ValidNamedObjectFile):
"""Tests that -MD, -MF, -MT and -o gernates dependency info file and
compilation output file correctly
e.g. glslc -MD subdir/shader.vert -c -o subdir/out -MF dep_info -MT label
=> <subdir/out: valid SPIR-V object file>
=> <dep_info: dependency info: "label: shader.vert">
"""
environment = EMPTY_SHADER_IN_SUBDIR
glslc_args = ['-MD', 'subdir/shader.vert', '-c', '-o', 'subdir/out', '-MF',
'dep_info', '-MT', 'label']
expected_object_filenames = ('subdir/out', )
target_filename = 'dep_info'
expected_file_contents = ['label: subdir/shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDWithDashMFDashMTDashODisassemblyMode(
expect.ValidFileContents, expect.ValidNamedAssemblyFile):
"""Tests that -MD, -MF, -MT and -o gernates dependency info file and
compilation output file correctly in disassembly mode
e.g. glslc -MD subdir/shader.vert -s -o subdir/out -MF dep_info -MT label
=> <subdir/out: valid SPIR-V object file>
=> <dep_info: dependency info: "label: shader.vert">
"""
environment = EMPTY_SHADER_IN_SUBDIR
glslc_args = ['-MD', 'subdir/shader.vert', '-S', '-o', 'subdir/out', '-MF',
'dep_info', '-MT', 'label']
expected_assembly_filenames = ('subdir/out', )
target_filename = 'dep_info'
expected_file_contents = ['label: subdir/shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorSetBothDashCapMAndDashCapMD(expect.StderrMatch):
"""Tests that when both -M (or -MM) and -MD are specified, glslc should exit
with an error message complaining the case and neither dependency info
output nor compilation output. This test has -MD before -M flag.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MD', '-M', 'shader.vert']
expected_stderr = ['glslc: error: both -M (or -MM) and -MD are specified. '
'Only one should be used at one time.\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorSetBothDashCapMDAndDashCapM(expect.StderrMatch):
"""Tests that when both -M (or -MM) and -MD are specified, glslc should exit
with an error message complaining the case and neither dependency info
output nor compilation output. This test has -M before -MD flag.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-M', '-MD', 'shader.vert']
expected_stderr = ['glslc: error: both -M (or -MM) and -MD are specified. '
'Only one should be used at one time.\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorDashCapMFWithMultipleInputFiles(expect.StderrMatch):
"""Tests that when -MF option is specified, only one input file should be
provided."""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-MD', 'a.vert', 'b.vert', '-c', '-MF', 'dep_info']
expected_stderr = ['glslc: error: '
'to specify dependency info file name or dependency '
'info target, only one input file is allowed.\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorDashCapMTWithMultipleInputFiles(expect.StderrMatch):
"""Tests that when -MT option is specified, only one input file should be
provided."""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-M', 'a.vert', 'b.vert', '-c', '-MT', 'target']
expected_stderr = ['glslc: error: '
'to specify dependency info file name or dependency '
'info target, only one input file is allowed.\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorDashCapMFMissingDashMAndDashMD(expect.StderrMatch):
"""Tests that when only -MF is specified while -M and -MD are not specified,
glslc should emit an error complaining that the user must specifiy either
-M (-MM) or -MD to generate dependency info.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MF', 'dep_info', 'shader.vert', '-c']
expected_stderr = ['glslc: error: '
'to generate dependencies you must specify either -M '
'(-MM) or -MD\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorDashCapMTMissingDashMAndMDWith(expect.StderrMatch):
"""Tests that when only -MF and -MT is specified while -M and -MD are not
specified, glslc should emit an error complaining that the user must
specifiy either -M (-MM) or -MD to generate dependency info.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MF', 'dep_info', '-MT', 'target', 'shader.vert', '-c']
expected_stderr = ['glslc: error: '
'to generate dependencies you must specify either -M '
'(-MM) or -MD\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorMissingDependencyInfoFileName(expect.StderrMatch):
"""Tests that dependency file name is missing when -MF is specified."""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['target', 'shader.vert', '-c', '-MF']
expected_stderr = ['glslc: error: '
'missing dependency info filename after \'-MF\'\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorMissingDependencyTargetName(expect.StderrMatch):
"""Tests that dependency target name is missing when -MT is specified."""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['target', 'shader.vert', '-c', '-MT']
expected_stderr = ['glslc: error: '
'missing dependency info target after \'-MT\'\n']
| bsd-3-clause | -7,187,553,513,730,992,000 | 42.816976 | 82 | 0.602004 | false |
OpenPathView/batchPanoMaker | opv_import/helpers/udev_observer.py | 1 | 1433 | # coding: utf-8
# Copyright (C) 2017 Open Path View, Maison Du Libre
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
# Contributors: Benjamin BERNARD <[email protected]>
# Email: [email protected]
# Description: Simply create an udev observer.
import pyudev
def create_udev_block_observer(event_listener, observer_name: str) -> pyudev.MonitorObserver:
"""
Create an udev block observer.
:param event_listener: Lambda executed when a new block device is detected. Will be executed with and : action: str, device: pyudev.Device
:param observer_name: Name of the observer.
:return: The created observer.
"""
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by(subsystem='block')
return pyudev.MonitorObserver(monitor, event_listener, name=observer_name) | gpl-3.0 | 8,850,746,572,900,994,000 | 43.8125 | 142 | 0.753664 | false |
superclass/superwas | nagios.py | 1 | 12906 | #!/usr/bin/python
# This file is part of Superwas.
#
# Superwas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Superwas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Superwas. If not, see <http://www.gnu.org/licenses/>.
# Classes to create Nagios statistics from WAS PMI data
#
# Author: Andre van Dijk (SuperClass IT)
# Date: $Date: 2013-01-18 16:38:05 +0100 (vr, 18 jan 2013) $
# $Id: nagios.py 428 2013-01-18 15:38:05Z andre $
class NagiosStatus:
def __init__(self, code, message, perfdata):
self.code=code
self.message=message
self.perfdata=perfdata
def getCode(self):
return self.code
def getMessage(self):
return self.message
def getPerformanceData(self):
return self.perfdata
class NagiosStat:
# Nagio Return values
OK=0 # indicates a service is working properly.
WARNING=1 # indicates a service is in warning state.
CRITICAL=2 # indicates a service is in critical state.
UNKNOWN=3 # indicates a service is in unknown state.
STATUS=["OK","WARNING","CRITICAL","UNKOWN"]
def __init__(self):
self.criticalThreshold=0
self.warningThreshold=0
self.statusinput=[]
def setStatus(self, stats):
pass
def setCriticalThreshold(self, critical):
self.criticalThreshold=int(critical)
def setWarningThreshold(self, warning):
self.warningThreshold=int(warning)
class HeapStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.current=-1
self.count=-1
def setCurrentHeapSize(self, current):
self.current=int(current)
def setUsedMemory(self, count):
self.count=int(count)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('HeapSize')
if pu is not None:
self.setCurrentHeapSize(pu.getCurrent())
pu=stat.getStatistic('UsedMemory')
if pu is not None:
self.setUsedMemory(pu.getCount())
def getStatus(self):
percentage=-1
status=self.UNKNOWN
message="HeapStatus unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("Heap stats off, returning OK")
return NagiosStatus(self.OK, "Heap thresholds unset", "")
if self.count!=-1 and self.current!=-1:
if self.count!=0:
percentage=(float(self.count)/self.current)*100
else:
percentage=0
if percentage>=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL heapSize %d/%d" % (percentage,self.criticalThreshold)
elif percentage>=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING heapSize %d/%d" % (percentage,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK heapSize %d/%d" % (percentage,self.warningThreshold)
logger.debug("Heap stats: %s %s" % (status,message))
return NagiosStatus(status, message,"Heap=%d%%;%d;%d;;;" % (percentage,self.warningThreshold,self.criticalThreshold))
class CPUStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.percentage=-1
def setCPUPercentage(self, percentage):
self.percentage=int(percentage)
def getStatus(self):
status=NagiosStat.UNKNOWN
message="CPU Usage unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("CPU stats off, returning OK")
return NagiosStatus(self.OK, "CPU thresholds unset", "")
if self.percentage!=-1:
if self.percentage >=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL CPU Usage %d/%d" % (self.percentage,self.criticalThreshold)
elif self.percentage >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING CPU Usage %d/%d" % (self.percentage,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK CPU Usage %d/%d" % (self.percentage,self.warningThreshold)
return NagiosStatus(status, message, "CPU=%d%%;%d;%d;;;" % (self.percentage,self.warningThreshold,self.criticalThreshold))
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('ProcessCpuUsage')
if pu is not None:
self.setCPUPercentage(pu.getCount())
class DataSourceUsageStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.percentUsed=-1
def setPercentUsed(self, percentUsed):
self.percentUsed=float(percentUsed)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('PercentUsed')
if pu is not None:
self.setPercentUsed(pu.getCurrent())
def getStatus(self):
status=NagiosStat.UNKNOWN
message="DataSource connection pool usage unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("DataSource usage stats off, returning OK")
return NagiosStatus(self.OK, "DataSource usage thresholds unset", "")
if self.percentUsed!=-1:
if self.percentUsed >=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL DataSource pool usage %d/%d" % (self.percentUsed,self.criticalThreshold)
elif self.percentUsed >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING DataSource pool usage %d/%d" % (self.percentUsed,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK DataSource usage %d/%d" % (self.percentUsed,self.warningThreshold)
return NagiosStatus(status, message, "DataSourceUsage=%d%%;%d;%d;;;" % (self.percentUsed,self.warningThreshold,self.criticalThreshold))
class DataSourceWaitStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.waitTime=-1
def setWaitTime(self, waitTime):
self.waitTime=float(waitTime)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('WaitTime')
if pu is not None:
self.setWaitTime(pu.getMean())
def getStatus(self):
status=NagiosStat.UNKNOWN
message="DataSource connection pool wait time unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("DataSource wait stats off, returning OK")
return NagiosStatus(self.OK, "DataSource wait time thresholds unset", "")
if self.waitTime!=-1:
if self.waitTime >=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL DataSource wait time %d/%d" % (self.waitTime,self.criticalThreshold)
elif self.waitTime >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING DataSource wait time %d/%d" % (self.waitTime,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK DataSource wait time %d/%d" % (self.waitTime,self.warningThreshold)
return NagiosStatus(status, message, "DataSourceWait=%dms;%d;%d;;;" % (self.waitTime,self.warningThreshold,self.criticalThreshold))
class DataSourceUsetimeStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.useTime=-1
def setUseTime(self, useTime):
self.useTime=float(useTime)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('UseTime')
if pu is not None:
self.setUseTime(pu.getMean())
def getStatus(self):
status=NagiosStat.UNKNOWN
message="DataSource connection pool use time unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("DataSource use time stats off, returning OK")
return NagiosStatus(self.OK, "DataSource use time thresholds unset", "")
if self.useTime!=-1:
if self.useTime >=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL DataSource use time %d/%d" % (self.useTime,self.criticalThreshold)
elif self.useTime >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING DataSource use time %d/%d" % (self.useTime,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK DataSource use time %d/%d" % (self.useTime,self.warningThreshold)
return NagiosStatus(status, message, "DataSourceUsetime=%dms;%d;%d;;;" % (self.useTime,self.warningThreshold,self.criticalThreshold))
class WebContainerConcurrentHungThreadCount(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.hungThreads=-1
self.maxPoolSize=-1
def setHungThreads(self, hungThreads):
self.hungThreads=int(hungThreads)
def setMaxPoolSize(self, maxpoolsize):
self.maxPoolSize=int(maxpoolsize)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('ConcurrentHungThreadCount')
if pu is not None:
self.setHungThreads(pu.getCurrent())
pu=stat.getStatistic('PoolSize')
if pu is not None:
self.setMaxPoolSize(pu.getUpperBound())
def getStatus(self):
status=NagiosStat.UNKNOWN
message="Webcontainer hung threads unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("Webcontainer hung threads stats off, returning OK")
return NagiosStatus(self.OK, "WebContainer hung threads thresholds unset", "")
if self.hungThreads!=-1 and self.maxPoolSize!=-1:
if self.maxPoolSize!=0:
percentage=(float(self.hungThreads)/self.maxPoolSize)*100
else:
percentage=0
if percentage >=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL Webcontainer hung threads %d/%d" % (percentage,self.criticalThreshold)
elif percentage >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING Webcontainer hung threads %d/%d" % (percentage,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK Webcontainer hung threads %d/%d" % (percentage,self.warningThreshold)
return NagiosStatus(status, message, "WebContainerConcurrentHungThreadCount=%d%%;%d;%d;;;" % (self.hungThreads,self.warningThreshold,self.criticalThreshold))
class WebContainerActiveStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.active=-1
self.maxPoolSize=-1
def setActive(self, active):
self.active=int(active)
def setMaxPoolSize(self, maxpoolsize):
self.maxPoolSize=int(maxpoolsize)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('ActiveCount')
if pu is not None:
self.setActive(pu.getCurrent())
pu=stat.getStatistic('PoolSize')
if pu is not None:
self.setMaxPoolSize(pu.getUpperBound())
def getStatus(self):
status=NagiosStat.UNKNOWN
message="Webcontainer usage unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("Webcontainer stats off, returning OK")
return NagiosStatus(self.OK, "WebContainer thresholds unset", "")
if self.active!=-1 and self.maxPoolSize!=-1:
if self.maxPoolSize!=0:
percentage=(float(self.active)/self.maxPoolSize)*100
else:
percentage=0
if percentage >=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL Webcontainer usage %d/%d" % (percentage,self.criticalThreshold)
elif percentage >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING Webcontainer usage %d/%d" % (percentage,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK Webcontainer usage %d/%d" % (percentage,self.warningThreshold)
return NagiosStatus(status, message, "WebContainerActiveStat=%d%%;%d;%d;;;" % (self.active,self.warningThreshold,self.criticalThreshold))
class LiveSessionStat(NagiosStat):
def __init__(self):
NagiosStat.__init__(self)
self.live=-1
def setLive(self, live):
self.live=int(live)
def setStatus(self, stats):
for stat in stats:
pu=stat.getStatistic('LiveCount')
if pu is not None:
self.setLive(pu.getCurrent())
def getStatus(self):
status=NagiosStat.UNKNOWN
message="Live sessions unknown"
if self.criticalThreshold<0 or self.warningThreshold<0:
logger.debug("Live sessions stats off, returning OK")
return NagiosStatus(self.OK, "Live sesions thresholds unset", "")
if self.live!=-1:
if self.live>=self.criticalThreshold:
status=NagiosStat.CRITICAL
message="CRITICAL Live sessions %d/%d" % (self.live,self.criticalThreshold)
elif self.live >=self.warningThreshold:
status=NagiosStat.WARNING
message="WARNING Live sessions %d/%d" % (self.live,self.warningThreshold)
else:
status=NagiosStat.OK
message="OK Live sessions %d/%d" % (self.live,self.warningThreshold)
return NagiosStatus(status, message, "LiveSession=%d;%d;%d;;;" % (self.live,self.warningThreshold,self.criticalThreshold))
| gpl-2.0 | -9,097,531,912,660,383,000 | 36.086207 | 159 | 0.707423 | false |
wichert/rest_toolkit | tests/ext/test_colander.py | 1 | 1039 | import pytest
from pyramid.httpexceptions import HTTPBadRequest
from rest_toolkit.abc import EditableResource
from rest_toolkit.ext.colander import ColanderSchemaValidationMixin
import colander
class AccountSchema(colander.Schema):
email = colander.SchemaNode(colander.String())
password = colander.SchemaNode(colander.String())
class DummyResource(ColanderSchemaValidationMixin, EditableResource):
schema = AccountSchema
def to_dict(self):
return {}
def update_from_dict(self, data, partial):
pass
def test_valid_request():
resource = DummyResource()
resource.validate({'email': '[email protected]', 'password': 'Jane'}, partial=False)
def test_validation_error():
resource = DummyResource()
with pytest.raises(HTTPBadRequest):
resource.validate({'email': '[email protected]'}, partial=False)
def test_partial_data():
resource = DummyResource()
resource.to_dict = lambda: {'password': 'Jane'}
resource.validate({'email': '[email protected]'}, partial=True)
| bsd-2-clause | 5,855,686,844,315,270,000 | 27.081081 | 87 | 0.725698 | false |
robinbach/adv-loop-perf | 04modelPython/Regression.py | 1 | 4435 | from sklearn import svm
from sklearn import linear_model
from sklearn.kernel_ridge import KernelRidge
import numpy as np
import sys
import random
import matplotlib.pyplot as plt
numTrain = 11
def readFile(fPath):
data = np.genfromtxt(fPath, delimiter=',')
random.shuffle(data)
performance = data.T[-2]
distortion = data.T[-1]
numX = len(data.T) - 2
A = data.T[0:numX]
for i in range(len(A)):
A[i] = A[i] / max(max(A[i]), 1.0)
A = A.T
ATrain = A[0:numTrain]
ATest = A[numTrain + 1:]
performanceTrain = performance[0:numTrain]
performanceTest = performance[numTrain + 1:]
distortionTrain = distortion[0:numTrain]
distortionTest = distortion[numTrain + 1:]
return ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest
def linearRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest):
lr = linear_model.LinearRegression()
lr.fit(ATrain, performanceTrain)
performancePred = lr.predict(ATest)
performanceErr = sum(abs(performancePred - performanceTest)) / len(performanceTest)
print 'linear regression performance error: ', performanceErr
lr.fit(ATrain, distortionTrain)
distortionPred = lr.predict(ATest)
distortionErr = sum(abs(distortionPred - distortionTest)) / len(distortionTest)
print 'linear regression distortion error: ', distortionErr
histoPlot(performancePred, performanceTest)
histoPlot(distortionPred, distortionTest)
def SVR(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest):
clf = svm.SVR(C=100, epsilon=0.001)
clf.fit(ATrain, performanceTrain)
performancePred = clf.predict(ATest)
performanceErr = sum(abs(performancePred - performanceTest)) / len(performanceTest)
print 'SVR performance error: ', performanceErr
clf.fit(ATrain, distortionTrain)
distortionPred = clf.predict(ATest)
distortionErr = sum(abs(distortionPred - distortionTest)) / len(distortionTest)
print 'SVR distortion error: ', distortionErr
histoPlot(performancePred, performanceTest)
histoPlot(distortionPred, distortionTest)
def ridgeRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest):
model = KernelRidge(alpha=0.01, kernel='sigmoid')
model.fit(ATrain, performanceTrain)
performancePred = model.predict(ATest)
performanceErr = sum(abs(performancePred - performanceTest)) / len(performanceTest)
print 'Kernel ridge performance error: ', performanceErr
model.fit(ATrain, distortionTrain)
distortionPred = model.predict(ATest)
distortionErr = sum(abs(distortionPred - distortionTest)) / len(distortionTest)
print 'Kernel ridge distortion error: ', distortionErr
histoPlot(performancePred, performanceTest)
histoPlot(distortionPred, distortionTest)
def robustRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest):
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(ATrain, performanceTrain)
model_ransac.predict(ATest)
temp = model_ransac.predict(ATest)
performancePred = []
for data in temp:
performancePred.append(data[0])
model_ransac.fit(ATrain, distortionTrain)
model_ransac.predict(ATest)
temp = model_ransac.predict(ATest)
distortionPred = []
for data in temp:
distortionPred.append(data[0])
histoPlot(performancePred, performanceTest)
histoPlot(distortionPred, distortionTest)
def histoPlot(pred, actual):
x = np.arange(len(actual))
plt.hold(True)
rects1 = plt.bar(x, pred, 0.2, color='r')
x = x + 0.2
rects2 = plt.bar(x, actual, 0.2)
plt.legend((rects1[0], rects2[0]), ('Prediction', 'Actual'), fontsize=20)
plt.xlabel('Data Point', fontsize=30)
plt.ylabel('Value', fontsize=30)
performanceErr = sum(abs(pred - actual)) / len(actual)
print 'Error: ', performanceErr
plt.title('Mean error: ' + ('%.3f' % performanceErr), fontsize=30)
plt.hold(False)
plt.show()
def main():
dataPath = sys.argv[1]
ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest = readFile(dataPath)
linearRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest)
SVR(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest)
ridgeRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest)
robustRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest)
if __name__ == '__main__':
main()
| mit | 5,522,304,864,581,364,000 | 37.912281 | 104 | 0.773393 | false |
yaelelmatad/EtsyApiTest | findSimilarShopsAllShopsByPopularity.py | 1 | 6336 | from __future__ import division
import json
import sys
import math
import random
#hard coded number of similar stores to spit out since HW said 5, could always add to command line
nSimilarStores = 5
maxBonus = 0.0005
class vectors:
def __init__(self, featureVectorName, multiplier, shopVectors):
self.multiplier= multiplier
#this will then hold 1/count
self.shopVectors= shopVectors
#spare feature vectors that only include features which appear in this particular shop
#eventually get normalized so that |shopVector| = 1
self.featureVectorProperty = featureVectorName
def getMultiplier(self):
'''return the multiplier after training, make sure to train and normalize before calling this function'''
return self.multiplier
def getShopVectors(self):
'''return the shopvectors. make sure to train and normalize before calling this routine'''
return self.shopVectors
def calculateDistance(self, shop1, shop2):
'''given two shop names, calculate the distance for this typeOfVector only'''
#check that both of the vectors are in this class, if not use the default empty dictionary
vec1 = {}
vec2 = {}
if shop1 in self.shopVectors:
vec1 = self.shopVectors[shop1]
if shop2 in self.shopVectors:
vec2 = self.shopVectors[shop2]
#the vectors are sparse, so not all keys appear in all vectors. Figure out which keys are in just one, and which are in both
allKeys = vec1.keys() + vec2.keys()
sharedKeys = []
justInFirst = []
justInSecond = []
for key in set(allKeys):
if key in vec1.keys() and key in vec2.keys():
sharedKeys.append(key)
elif key in vec1.keys():
justInFirst.append(key)
else:
justInSecond.append(key)
dist2 = 0 #actually the squared distance
#since we used all our store data to train our multiplier, we know that the multiplier contains all keys
for key in justInFirst:
dist2 += math.pow(vec1[key],2)*(self.multiplier[key])
#dist2 += math.pow(vec1[key],2)
for key in justInSecond:
dist2 += math.pow(vec2[key],2)*(self.multiplier[key])
#dist2 += math.pow(vec2[key],2)
for key in sharedKeys:
dist2 += math.pow(vec2[key]-vec1[key],2)*(self.multiplier[key])
#dist2 += math.pow(vec2[key]-vec1[key],2)
return math.sqrt(dist2)
def main(jsonInputForMultiplier, jsonInputFileForVectors, jsonShopInfo, outputFileName):
#read the json input
multFile = open(jsonInputForMultiplier,'r')
multipliers =json.load(multFile)
multFile.close()
shopVecFile = open(jsonInputFileForVectors,'r')
shopVectors = json.load(shopVecFile)
shopVecFile.close()
jsonShopFile = open(jsonShopInfo,'r')
shopDetails = json.load(jsonShopFile)
jsonShopFile.close()
#here is where I calculate what "bonus" to give the store if it is very popular
maxPopularity = 1
for shop in shopDetails:
currPop = shopDetails[shop][0]["num_favorers"]
if currPop > maxPopularity:
maxPopularity = currPop
#max seems to be ~170 for my data
#find out how many different things we trained against
typesOfVectors = [key for key in multipliers]
#initialize the vectorClasses with the trained data
vectorClasses = {}
for typeVec in typesOfVectors:
vectorClasses[typeVec] = vectors(typeVec, multipliers[typeVec],shopVectors[typeVec])
#find all the shop names (not necessarily unique)
shopNamesNotSet = []
#so we can get all shops, not all shops appear in all feature sets
for typeVec in typesOfVectors:
shopNamesNotSet += [shop for shop in shopVectors[typeVec]]
#now remove duplicates
shopNames = set(shopNamesNotSet)
outputFile = open(outputFileName, 'wb')
for originalShop in shopNames:
distances = []
accum = 0
for shop in shopNames:
dist = 0
#go through all the shops and calculate the distance
if shop == originalShop:
#don't waste your time calculating self distance
continue
for typeVec in typesOfVectors:
#there are len(typesOfVectors) different "length" vectors to calculate
dist+=vectorClasses[typeVec].calculateDistance(originalShop,shop)
#if shop != originalShop:
accum += dist
#subtract a bit of distance if a store is really popular.
dist+= (-1)*maxBonus*float(shopDetails[shop][0]["num_favorers"])/float(maxPopularity)
distances.append((shop,dist))
#print "average ", float(accum)/float(len(distances))
#certainly not necessary to keep all the distances and then sort. could just keep the list of "nSimilarStores" currently with lowest distane values, but the sort is quick on only 5000 members
sortedDist = sorted(distances, key=lambda t: t[1])
#sort on second element of tuple
stringToPrint = originalShop+ ": " + sortedDist[0][0]
for i in range(1,nSimilarStores):
stringToPrint += ", " + sortedDist[i][0]
stringToPrint += "\n"
outputFile.write(stringToPrint)
outputFile.close()
def usage():
sys.stderr.write("""
given a multiplier.json and a shopvectors.json goes through ALL the stores and finds the five most similar stores. This version also gives stores that are more popular a bonus. Avg distance 0.3. Stores can reduce the distance to current store by up to 0.05 if they have most favorers of the list. If there are no favorers, there is no distance reduction.
\n Third argument should be output file you want to write to like "similarShops.dat" for example you might use: \n
python findSimilarShopsALlShopsByPopularity.py multiplier.json vectors.json storeData.json similarShopsByPopularity.dat
\n""")
if __name__ == "__main__":
#check the usage is correct, user can specif 2 or 3 arguments
if len(sys.argv) != 5:
usage()
sys.exit(1)
main(sys.argv[1],sys.argv[2], sys.argv[3], sys.argv[4])
| gpl-3.0 | -9,196,436,886,653,949,000 | 40.142857 | 360 | 0.652778 | false |
ZeitOnline/zeit.content.article | src/zeit/content/article/edit/browser/tests/test_rawxml.py | 1 | 1493 | import zeit.content.article.edit.browser.testing
class Form(zeit.content.article.edit.browser.testing.BrowserTestCase):
block_type = 'raw'
def test_inline_form_saves_values(self):
self.get_article(with_empty_block=True)
b = self.browser
b.open('editable-body/blockname/@@edit-rawxml?show_form=1')
b.getControl('XML source').value = """\
<raw xmlns:ns0="http://namespaces.zeit.de/CMS/cp" ns0:__name__="blockname">
<foo> </foo>
</raw>
"""
b.getControl('Apply').click()
b.open('@@edit-rawxml?show_form=1')
self.assertEllipsis("""\
<raw...xmlns:ns0="http://namespaces.zeit.de/CMS/cp"...ns0:__name__="blockname"...>
<foo> </foo>
</raw>
""", b.getControl('XML source').value)
def test_xml_is_validated_root_must_be_raw_element(self):
self.get_article(with_empty_block=True)
b = self.browser
b.open('editable-body/blockname/@@edit-rawxml?show_form=1')
b.getControl('XML source').value = '<foo />'
b.getControl('Apply').click()
self.assertIn(
'<span class="error">The root element must be <raw>.</span>',
b.contents)
class FormLoader(zeit.content.article.edit.browser.testing.EditorTestCase):
def test_rawxml_form_is_loaded(self):
s = self.selenium
self.add_article()
self.create_block('raw')
s.assertElementPresent('css=.block.type-raw .inline-form '
'.field.fieldname-xml')
| bsd-3-clause | -7,119,949,674,202,443,000 | 33.72093 | 82 | 0.616879 | false |
yeleman/snisi | snisi_maint/management/commands/update-cluster-from-std-csv.py | 1 | 3120 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
import os
from django.core.management.base import BaseCommand
from optparse import make_option
from py3compat import PY2
from snisi_core.models.Entities import Entity
from snisi_core.models.Projects import Cluster, Participation
if PY2:
import unicodecsv as csv
else:
import csv
logger = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-f',
help='CSV file',
action='store',
dest='filename'),
)
def handle(self, *args, **options):
if not os.path.exists(options.get('filename') or ""):
logger.error("CSV file `{}` does not exist."
.format(options.get('filename')))
return
headers = ['action', 'slug', 'cluster', 'include_hc']
input_csv_file = open(options.get('filename'), 'r')
csv_reader = csv.DictReader(input_csv_file, fieldnames=headers)
for entry in csv_reader:
if csv_reader.line_num == 1:
continue
entity = Entity.get_or_none(entry.get('slug'))
if entity is None:
logger.warning("Entity `{}` does not exist."
.format(entry.get('SNISI')))
continue
cluster = Cluster.get_or_none(entry.get('cluster'))
if cluster is None:
logger.error("Cluster `{}` does not exist."
.format(options.get('cluster_slug')))
continue
include_hc = bool(entry.get('include_hc'))
entities = [entity]
if include_hc:
entities += entity.get_health_centers()
if entry.get('action') == 'add':
for e in entities:
p, created = Participation.objects.get_or_create(
cluster=cluster,
entity=e,
is_active=True)
logger.info(p)
if entry.get('action') == 'disable':
for p in Participation.objects.filter(
cluster=cluster,
entity__slug__in=[e.slug for e in entities]):
p.is_active = False
p.save()
logger.info(p)
if entry.get('action') == 'enable':
for p in Participation.objects.filter(
cluster=cluster,
entity__slug__in=[e.slug for e in entities]):
p.is_active = True
p.save()
logger.info(p)
if entry.get('action') == 'remove':
Participation.objects.filter(
cluster=cluster,
entity__slug__in=[e.slug for e in entities]).delete()
logger.info("All Done")
| mit | -835,938,490,400,088,200 | 31.842105 | 73 | 0.501923 | false |
mbartling/TAMU_senior_design | Python/get_xbee_servo.py | 1 | 4195 | #! /usr/bin/env python
import serial
import sys
import os
import MySQLdb
from subprocess import call
from datetime import date
FORCE_WRITE = 0
HORIZONTAL = 0
VERTICAL = 90
today = date.today()
try:
address_array = []
# open data base
db = MySQLdb.connect(host="localhost", user="", passwd="team05", db="xbee_teensy")
cur = db.cursor()
cur.execute("select version()")
data = cur.fetchone()
print "Database version: ", data
cur.execute("truncate table raw_data")
# open serial port
xbee = serial.Serial()
xbee.baudrate = 57600
if len(sys.argv) > 1:
xbee.port = sys.argv[1]
else:
xbee.port = '/dev/ttyACM0'
if xbee.isOpen():
xbee.close()
xbee.open()
print xbee
xbee.write("?")
if xbee.isOpen:
for line in xbee:
line = line.strip()
packet = line.split()
print line;
if len(packet) > 1 and packet[0] == '7E':
if len(packet) < 26 or int(packet[11], 16) != 0x64:
print "Packet len is: " + "{0}".format(len(packet))
continue;
# calling system command for timestamp
p = os.popen('date "+%F %T"')
timestamp = p.readline()
p.close()
timestamp = timestamp.rstrip('\n')
timestamp = timestamp.rstrip('\0')
print "Time is: " + timestamp
# parse address
addressH = packet[8:11]
addressH.append(packet[14])
# convert to dec, then string
addressString = ''
for item in addressH:
x = int(item, 16)
addressString += str(x) + '.'
addressString = addressString[:-1]
print "Initial Address: " + addressString
# parse rssi
rssi = int(packet[15], 16)
print "RSSI = ", rssi
# parse survo position
servoPos = int(packet[16], 16)
print "servoPos =", servoPos
# parse gps
latArray = packet[17:21]
latHex = ''.join(latArray)
print latHex
if latHex == '0000':
lat = 0
else:
lat = int(latHex, 16)
lonArray = packet [21:25]
lonHex = ''.join(lonArray)
print lonHex
if lonHex == '0000':
lon = 0;
else:
lon = int(lonHex, 16)
lon = lon ^ 0xFFFFFFFF
lon += 1
lon *= -1
print lat, lon
if FORCE_WRITE:
cmd = "insert into raw_data values(\"%s\",\"%s\", %d, %d, %d, %d)" %(timestamp, addressString, servoPos, rssi, lat, lon)
print cmd
cur.execute(cmd)
db.commit()
print "new row added to mysql"
if not addressString in address_array:
print "Adding address string: " + addressString
address_array.append(addressString)
else:
if lon > -970000000 and lon < -960000000 and lat > 306000000 and lat < 307000000:
cmd = "insert into raw_data values(\"%s\",\"%s\", %d, %d, %d, %d)" %(timestamp, addressString, rssi, servoPos, lat, lon)
print cmd
cur.execute(cmd)
db.commit()
print "new row added to mysql"
if not addressString in address_array:
print "Adding address string: " + addressString
address_array.append(addressString)
print "Closing Xbee Port"
finally:
print "output data to file"
# os.popen('rm -f /home/walter/Code/rawData/*.txt')
# os.popen('rm -f /tmp/raw101.txt')
for address in address_array:
# write horizontal
address_split = address.split('.');
filename = '/tmp/raw' + address_split[3] + 'horiz.txt'
os.popen('rm ' + filename)
print filename
cmd = "select row, col, rssi from raw_data where address = \'%s\' and servoPos = %d into outfile \'%s\' fields terminated by ','" %(address, HORIZONTAL, filename)
print cmd
cur.execute(cmd)
cmd = 'cp ' + filename + ' /home/walter/Code/rawData/raw' + address_split[3] + today.strftime("-%y-%m-%d") + 'horiz.out'
print cmd
os.popen(cmd)
filename = '/tmp/raw' + address_split[3] + 'vert.txt'
os.popen('rm ' + filename)
print filename
cmd = "select row, col, rssi from raw_data where address = \'%s\' and servoPos = %d into outfile \'%s\' fields terminated by ','" %(address, VERTICAL, filename)
print cmd
cur.execute(cmd)
cmd = 'cp ' + filename + ' /home/walter/Code/rawData/raw' + address_split[3] + today.strftime("-%y-%m-%d") + 'vert.out'
print cmd
os.popen(cmd)
print "closing xbee port and database"
db.close()
xbee.close()
| mit | -2,135,066,010,321,820,200 | 26.598684 | 164 | 0.615256 | false |
Syncleus/apex | src/apex/kiss/kiss_serial.py | 1 | 3486 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""KISS Core Classes."""
# These imports are for python3 compatibility inside python2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import serial
import six
from apex.kiss import constants as kiss_constants
from .kiss import Kiss
__author__ = 'Jeffrey Phillips Freeman (WI2ARD)'
__maintainer__ = 'Jeffrey Phillips Freeman (WI2ARD)'
__email__ = '[email protected]'
__license__ = 'Apache License, Version 2.0'
__copyright__ = 'Copyright 2016, Syncleus, Inc. and contributors'
__credits__ = []
class KissSerial(Kiss):
"""KISS Serial Object Class."""
logger = logging.getLogger(__name__)
logger.setLevel(kiss_constants.LOG_LEVEL)
console_handler = logging.StreamHandler()
console_handler.setLevel(kiss_constants.LOG_LEVEL)
formatter = logging.Formatter(kiss_constants.LOG_FORMAT)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.propagate = False
def __init__(self, strip_df_start=True,
com_port=None,
baud=38400,
parity=serial.PARITY_NONE,
stop_bits=serial.STOPBITS_ONE,
byte_size=serial.EIGHTBITS):
super(KissSerial, self).__init__(strip_df_start)
self.com_port = com_port
self.baud = baud
self.parity = parity
self.stop_bits = stop_bits
self.byte_size = byte_size
self.serial = None
self.logger.info('Using interface_mode=Serial')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.serial.close()
def __del__(self):
if self.serial and self.serial.isOpen():
self.serial.close()
def _read_interface(self):
read_data = self.serial.read(kiss_constants.READ_BYTES)
waiting_data = self.serial.inWaiting()
if waiting_data:
read_data += self.serial.read(waiting_data)
return [ord(c) if six.PY2 else c for c in read_data]
def _write_interface(self, data):
self.serial.write(data)
def connect(self, mode_init=None, **kwargs):
"""
Initializes the KISS device and commits configuration.
See http://en.wikipedia.org/wiki/KISS_(TNC)#Command_codes
for configuration names.
:param **kwargs: name/value pairs to use as initial config values.
"""
self.logger.debug('kwargs=%s', kwargs)
self.serial = serial.Serial(port=self.com_port, baudrate=self.baud, parity=self.parity,
stopbits=self.stop_bits, bytesize=self.byte_size)
self.serial.timeout = kiss_constants.SERIAL_TIMEOUT
if mode_init is not None:
self.serial.write(mode_init)
self.exit_kiss = True
else:
self.exit_kiss = False
# Previous verious defaulted to Xastir-friendly configs. Unfortunately
# those don't work with Bluetooth TNCs, so we're reverting to None.
if kwargs:
for name, value in kwargs.items():
super(KissSerial, self)._write_setting(name, value)
def close(self):
super(KissSerial, self).close()
if not self.serial:
raise RuntimeError('Attempting to close before the class has been started.')
elif self.serial.isOpen():
self.serial.close()
| apache-2.0 | -7,112,238,142,381,846,000 | 30.690909 | 95 | 0.626219 | false |
khertan/gedit_flake8 | gedit_flake8/__init__.py | 1 | 14861 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""gedit-flake8 : A plugin for gedit
to display error and warning from flake8."""
__author__ = "Benoît HERVIER"
__copyright__ = "Copyright 2012 " + __author__
__license__ = "GPLv3"
__version__ = "0.7.0"
__maintainer__ = "Benoît HERVIER"
__email__ = "[email protected]"
__status__ = "Beta"
try:
from gi.repository import GObject, Gtk, Gedit, Pango
except ImportError as err:
print('GEdit-Flake8 need to be launched by GEdit 3')
print(err)
import re
from subprocess import Popen, PIPE, call
import threading
GObject.threads_init()
def _remove_tags(document, errors_tag):
"""Remove not anymore used tags"""
if errors_tag:
start, end = document.get_bounds()
document.remove_tag(errors_tag, start, end)
def apply_style(style, tag):
"""Apply a style to a tag from the default theme style
This lightly modified code come from the synctext.py gedit plugin"""
def apply_style_prop(tag, style, prop):
if style.get_property(prop + "-set"):
tag.set_property(prop, style.get_property(prop))
else:
tag.set_property(prop, None)
def apply_style_prop_bool(tag, style, prop, whentrue, whenfalse):
if style.get_property(prop + "-set"):
prop_value = whentrue if style.get_property(prop) else whenfalse
tag.set_property(prop, prop_value)
apply_style_prop(tag, style, "foreground")
apply_style_prop(tag, style, "background")
try:
apply_style_prop_bool(tag,
style,
"weight",
Pango.Weight.BOLD,
Pango.Weight.NORMAL)
except TypeError as err:
# Different version of gtk 3 have different properties ... :(
print(err)
apply_style_prop_bool(tag,
style,
"italic",
Pango.Style.ITALIC,
Pango.Style.NORMAL)
apply_style_prop_bool(tag,
style,
"underline",
Pango.Underline.SINGLE,
Pango.Underline.NONE)
apply_style_prop(tag, style, "strikethrough")
class _IdleObject(GObject.Object):
"""
Override gobject.GObject to always emit signals in the main thread
by emmitting on an idle handler
"""
def __init__(self):
GObject.Object.__init__(self)
def emit(self, *args):
GObject.idle_add(GObject.Object.emit, self, *args)
class Message(object):
def __init__(self, document, lineno, column, message):
self._doc = document
self._lineno = lineno
self._column = column
self._message = message
self._start_iter = None
self._end_iter = None
self._stock_id = self._get_stock_id(message)
def _get_stock_id(self, message):
if message.startswith('E'):
return Gtk.STOCK_DIALOG_ERROR
elif message.startswith('W'):
return Gtk.STOCK_DIALOG_WARNING
elif message.startswith('C'):
return Gtk.STOCK_DIALOG_INFO
else:
return Gtk.STOCK_DIALOG_INFO
def setWordBounds(self, start, end):
self._start_iter = start
self._end_iter = end
doc = property(lambda self: self.__doc)
lineno = property(lambda self: self._lineno)
column = property(lambda self: self._lineno)
message = property(lambda self: self._message)
start = property(lambda self: self._start_iter)
end = property(lambda self: self._end_iter)
stock_id = property(lambda self: self._stock_id)
class ResultsModel(Gtk.ListStore):
def __init__(self):
super(ResultsModel, self).__init__(int, int, str)
def add(self, msg):
self.append([msg.lineno, msg.column, msg.message])
class ResultsView(Gtk.TreeView):
def __init__(self, panel):
super(ResultsView, self).__init__()
self._panel = panel
linha = Gtk.TreeViewColumn("Line")
linha_cell = Gtk.CellRendererText()
linha.pack_start(linha_cell, True)
linha.add_attribute(linha_cell, 'text', 0)
linha.set_sort_column_id(0)
self.append_column(linha)
msgtype = Gtk.TreeViewColumn("Column")
msgtype_cell = Gtk.CellRendererText()
msgtype.pack_start(msgtype_cell, True)
msgtype.add_attribute(msgtype_cell, 'text', 1)
msgtype.set_sort_column_id(1)
self.append_column(msgtype)
msg = Gtk.TreeViewColumn("Message")
msg_cell = Gtk.CellRendererText()
msg.pack_start(msg_cell, True)
msg.add_attribute(msg_cell, 'text', 2)
msg.set_sort_column_id(2)
self.append_column(msg)
self.connect("row-activated", self._row_activated_cb)
def _row_activated_cb(self, view, row, column):
model = view.get_model()
iter = model.get_iter(row)
window = self._panel.get_window()
document = window.get_active_document()
line = model.get_value(iter, 0) - 1
document.goto_line(line)
view = window.get_active_view()
text_iter = document.get_iter_at_line(line)
view.scroll_to_iter(text_iter, 0.25, False, 0.5, 0.5)
view.grab_focus()
class ResultsPanel(Gtk.ScrolledWindow):
def __init__(self, window):
super(ResultsPanel, self).__init__()
self.window = window
self.view = ResultsView(self)
self.add(self.view)
self.view.show()
def set_model(self, model):
self.view.set_model(model)
def get_window(self):
return self.window
class Worker(threading.Thread, _IdleObject):
__gsignals__ = {
"completed": (
GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, []), }
def __init__(self, document, errors_tag):
self.document = document
threading.Thread.__init__(self)
_IdleObject.__init__(self)
if errors_tag is None:
self._add_tags(document)
else:
self._errors_tag = errors_tag
self._results = []
self._errors = []
self.cancelled = False
def _add_tags(self, document):
"""Register new tags in the sourcebuffer"""
style = document.get_style_scheme().get_style('def:error')
self._errors_tag = \
document.create_tag("flake8-error",
underline=Pango.Underline.ERROR)
apply_style(style, self._errors_tag)
def _highlight_errors(self, errors):
"""Colorize error in the sourcebuffer"""
document = self.document
for err in errors:
start = document.get_iter_at_line(err.lineno - 1)
end = document.get_iter_at_line(err.lineno - 1)
end.forward_to_line_end()
# apply tag to entire line
document.apply_tag(self._errors_tag, start, end)
def _flake8_bin(self):
"""Returns a flake8 valid executable
flake8 is the default executable, but in Debian systems,
for example, package pyflakes provides a pyflakes binary
instead of flake8
"""
# list of flake binaries
flake8_binaries = ('flake8', 'pyflakes')
def cmd_exists(cmd):
return call("type " + cmd,
shell=True,
stdout=PIPE, stderr=PIPE) == 0
for flake8 in flake8_binaries:
if cmd_exists(flake8):
return flake8
# default
return "flake8"
def run(self):
errors = []
location = self.document.get_location()
_remove_tags(self.document, self._errors_tag)
if location is None:
print('Location not found ...')
return
path = location.get_path()
if path is None:
import codecs
try:
encoding = self.document.get_encoding().get_charset()
except Exception as err:
encoding = 'utf-8'
path = '/tmp/gedit_flake8.py'
start, end = self.document.get_bounds()
with codecs.open(path, 'w', encoding=encoding) as fh:
fh.write(str(
self.document.get_text(start, end,
include_hidden_chars=True),
encoding))
stdout, stderr = Popen([self._flake8_bin(), path],
stdout=PIPE, stderr=PIPE).communicate()
output = stdout if stdout else stderr
line_format = re.compile(
'(?P<path>[^:]+):(?P<line>\d+):'
+ '(?P<character>\d+:)?\s(?P<message>.*$)')
self._results = ResultsModel()
if not output:
if not self.cancelled:
self.emit("completed")
return
for line in output.splitlines():
m = line_format.match(line.decode('utf-8'))
if not m:
continue
groups = m.groupdict()
if groups['character']:
err = Message(self.document,
int(groups['line']),
int(groups['character'].strip(':')),
groups['message'],)
else:
err = Message(self.document,
int(groups['line']),
0,
groups['message'],)
errors.append(err)
self._results.add(err)
_remove_tags(self.document, self._errors_tag)
self._errors = errors
self._highlight_errors(self._errors)
if not self.cancelled:
self.emit("completed")
class Flake8Plugin(GObject.Object, Gedit.WindowActivatable):
__gtype_name__ = "Flake8"
window = GObject.property(type=Gedit.Window)
documents = []
_errors_tag = {}
_results = {}
_errors = {}
_worker = None
def __init__(self):
GObject.Object.__init__(self)
def do_activate(self):
# self._insert_panel()
self._panel = ResultsPanel(self.window)
self._panel.show()
bottom = self.window.get_bottom_panel()
bottom.add_titled(self._panel, "ResultsPanel", "Flake8 Results")
self.window.connect("tab-added", self.on_tab_added)
self.window.connect("tab-removed", self.on_tab_removed)
self.window.connect("active-tab-changed", self.on_active_tab_changed)
def do_deactivate(self):
# self._remove_panel()
pass
def on_notify_style_scheme(self, document, param_object):
style = document.get_style_scheme().get_style('def:error')
apply_style(style, self._errors_tag[document])
def _insert_panel(self):
"""Insert bottom GEdit panel"""
self._panel = ResultsPanel(self.window)
image = Gtk.Image()
image.set_from_icon_name('gnome-mime-text-x-python',
Gtk.IconSize.MENU)
bottom_panel = self.window.get_bottom_panel()
bottom_panel.add_item(self._panel,
'ResultsPanel',
'Flake8 Results',
image)
def display_error_msg(self, document):
"""Display a statusbar message if the current line have errors"""
if document is None:
return True
try:
if document.get_language().get_name() != 'Python':
return True
except AttributeError as err:
return True
curline = document.get_iter_at_mark(
document.get_insert()).get_line() + 1
for err in self._errors[document]:
if err.lineno == curline:
statusbar = self.window.get_statusbar()
statusbar_ctxtid = statusbar.get_context_id('Flake8')
statusbar.push(statusbar_ctxtid, 'Line : %s : %s'
% (err.lineno, err.message))
return True
return False
def _remove_panel(self):
"""Remove the inserted panel from GEdit"""
bottom_panel = self.window.get_bottom_panel()
bottom_panel.remove_item(self._panel)
def on_active_tab_changed(self, window, tab):
self._panel.set_model(self._results[tab.get_document()])
def on_tab_added(self, window, tab):
"""Initialize the required vars"""
document = tab.get_document()
self._results[document] = ResultsModel()
self._errors[document] = []
self._errors_tag[document] = None
document.connect('loaded', self.analyse)
document.connect('saved', self.analyse)
document.connect('cursor-moved', self.display_error_msg)
def on_tab_removed(self, window, tab):
"""Cleaning results not needed anymore"""
document = tab.get_document()
if document in self._results:
self._results[document] = None
del self._results[document]
self._errors[document] = None
del self._errors[document]
_remove_tags(document, self._errors_tag[document])
def completedCb(self, *userData):
errors = self._worker._errors
document = self._worker.document
self._errors[document] = errors
self._results[document] = self._worker._results
self._errors_tag[document] = self._worker._errors_tag
if len(errors) > 0:
if not self.display_error_msg(document):
statusbar = self.window.get_statusbar()
statusbar_ctxtid = statusbar.get_context_id('Flake8')
statusbar.push(statusbar_ctxtid,
'Line : %s : %s'
% (errors[0].lineno, errors[0].message))
else:
statusbar = self.window.get_statusbar()
statusbar_ctxtid = statusbar.get_context_id('Flake8')
statusbar.push(statusbar_ctxtid,
"No errors found")
try:
self._panel.set_model(self._results[document])
except:
pass
self._worker = None
def analyse(self, doc):
"""Launch a process and populate vars"""
document = self.window.get_active_document()
if document is None:
return True
try:
if document.get_language().get_name() != 'Python':
return True
except AttributeError:
return True
if self._worker is not None:
self._worker.cancelled = True
self._worker = Worker(document, self._errors_tag[document])
self._worker.connect("completed", self.completedCb)
self._worker.start()
| gpl-3.0 | 2,962,702,410,115,638,000 | 29.95625 | 77 | 0.553671 | false |
schocco/mds-web | mds_website/settings.py | 1 | 8687 | # -*- coding: utf-8 -*-
# Django settings for mds_website project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_DIR = os.path.dirname(__file__)
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'mdsdb', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'postgres',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
#dont force trailing backslash
#APPEND_SLASH = False
#TASTYPIE_ALLOW_MISSING_SLASH = APPEND_SLASH
TASTYPIE_DEFAULT_FORMATS = ['json']
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.abspath(os.path.join(PROJECT_DIR, "media"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.abspath(os.path.join(PROJECT_DIR, "../sitestatic"))
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.abspath(os.path.join(PROJECT_DIR, "../mds-web-client/dist")),
)
STATICFILES_STORAGE = 'webpack.storage.WebpackHashStorage'
WEBPACK_ASSETS_FILE = os.path.abspath(os.path.join(PROJECT_DIR, "../mds-web-client/webpack-assets.json"))
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'use your own secret key.'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
# google api console: https://console.developers.google.com/project/api-access-tests/apiui/credential?authuser=0
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.twitter.TwitterOAuth',
'social.backends.vk.VKOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
#SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['https://www.googleapis.com/auth/profile', 'https://www.googleapis.com/auth/email']
LOGIN_REDIRECT_URL = '/'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'apps.mds_auth.middleware.SocialAuthExceptionHandlerMiddleware'
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'apps.mds_auth.auth_pipeline.save_profile', # get profile data from oauth resource
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'apps.mds_auth.auth_pipeline.device_redirect', # change ?next parameter to provide access token for mobile apps
)
ROOT_URLCONF = 'mds_website.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mds_website.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
)
############# CELERY SETTINGS
## Using the database to store task state and results.
CELERY_RESULT_BACKEND = 'amqp'
BROKER_HOST = "localhost"
#BROKER_URL = 'amqp://guest:guest@localhost:5672/celeryvhost'
CELERY_TIMEZONE = TIME_ZONE
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.gis',
'social.apps.django_app.default',
'tastypie',
'apps.muni_scales',
'apps.trails',
'apps.mds_auth',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'custom': {
'handlers': ['console', 'mail_admins'],
'level': 'DEBUG',
}
}
}
# import local settings file if one exists
# apparantly using system environments is the better solution
try:
from settings_local import *
except Exception, e:
print("Could not find a local settings file.")
| mit | -8,026,578,457,532,090,000 | 33.200787 | 127 | 0.687234 | false |
micjabbour/AndroidGuard-WebApp | AndroidGuard/models.py | 1 | 3073 | from . import db
from .config import AppConfig
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from sqlalchemy import desc
from itsdangerous import Serializer, BadSignature
class Location(db.Model):
id = db.Column(db.Integer, primary_key=True)
latitude = db.Column(db.DECIMAL(9,6), nullable=False)
longitude = db.Column(db.DECIMAL(9,6), nullable=False)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
device_id = db.Column(db.Integer, db.ForeignKey('device.id'), nullable=False)
def serialize(self):
return {'latitude': str(self.latitude),
'longitude': str(self.longitude),
'timestamp': self.timestamp.isoformat()+'Z' # HACK
}
class Device(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
fcm_token = db.Column(db.Text)
locations = db.relationship('Location', backref='device', lazy='select')
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
db.UniqueConstraint('name', 'user_id')
@property
def last_location(self):
return Location.query.filter_by(device_id=self.id).order_by(desc('location.id')).first()
def get_device_dict(self):
device_dict = {'id': self.id, 'name': self.name}
if self.last_location:
device_dict['last_location'] = self.last_location.serialize()
return device_dict
def generate_auth_token(self):
s = Serializer(AppConfig.SECRET_KEY)
return s.dumps(self.id)
@staticmethod
def verify_auth_token(token):
s = Serializer(AppConfig.SECRET_KEY)
try:
id = s.loads(token)
except BadSignature:
return None
device = Device.query.get(id)
return device
@staticmethod
def get_by_devicename(user, name):
device_list = user.devices
for device in device_list:
if device.name == name:
return device
return None
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.Text, unique=True)
password_hash = db.Column(db.Text)
devices = db.relationship('Device', backref='user', lazy='dynamic')
@property
def password(self):
raise AttributeError('password: write-only field')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
@staticmethod
def get_by_username(username):
return User.query.filter_by(username=username).first()
@staticmethod
def verify_credentials(username, password):
user = User.get_by_username(username)
if user is not None and user.check_password(password):
return user
return None
def __repr__(self):
return "<User '{}'>".format(self.username)
| unlicense | 256,992,587,173,349,120 | 31.691489 | 96 | 0.649528 | false |
noironetworks/neutron | neutron/db/rbac_db_mixin.py | 1 | 6467 | # Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import exceptions as c_exc
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import api as db_api
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions as n_exc
from oslo_db import exception as db_exc
from neutron.db import common_db_mixin
from neutron.extensions import rbac as ext_rbac
from neutron.objects import base as base_obj
from neutron.objects import rbac as rbac_obj
class RbacPluginMixin(common_db_mixin.CommonDbMixin):
"""Plugin mixin that implements the RBAC DB operations."""
object_type_cache = {}
supported_extension_aliases = ['rbac-policies']
@db_api.retry_if_session_inactive()
def create_rbac_policy(self, context, rbac_policy):
e = rbac_policy['rbac_policy']
try:
registry.notify(resources.RBAC_POLICY, events.BEFORE_CREATE, self,
context=context, object_type=e['object_type'],
policy=e)
except c_exc.CallbackFailure as e:
raise n_exc.InvalidInput(error_message=e)
rbac_class = (
rbac_obj.RBACBaseObject.get_type_class_map()[e['object_type']])
try:
rbac_args = {'project_id': e['project_id'],
'object_id': e['object_id'],
'action': e['action'],
'target_tenant': e['target_tenant']}
_rbac_obj = rbac_class(context, **rbac_args)
_rbac_obj.create()
except db_exc.DBDuplicateEntry:
raise ext_rbac.DuplicateRbacPolicy()
return self._make_rbac_policy_dict(_rbac_obj)
@staticmethod
def _make_rbac_policy_dict(entry, fields=None):
res = {f: entry[f] for f in ('id', 'project_id', 'target_tenant',
'action', 'object_id')}
res['object_type'] = entry.db_model.object_type
return db_utils.resource_fields(res, fields)
@db_api.retry_if_session_inactive()
def update_rbac_policy(self, context, id, rbac_policy):
pol = rbac_policy['rbac_policy']
entry = self._get_rbac_policy(context, id)
object_type = entry.db_model.object_type
try:
registry.notify(resources.RBAC_POLICY, events.BEFORE_UPDATE, self,
context=context, policy=entry,
object_type=object_type, policy_update=pol)
except c_exc.CallbackFailure as ex:
raise ext_rbac.RbacPolicyInUse(object_id=entry.object_id,
details=ex)
entry.update_fields(pol)
entry.update()
return self._make_rbac_policy_dict(entry)
@db_api.retry_if_session_inactive()
def delete_rbac_policy(self, context, id):
entry = self._get_rbac_policy(context, id)
object_type = entry.db_model.object_type
try:
registry.notify(resources.RBAC_POLICY, events.BEFORE_DELETE, self,
context=context, object_type=object_type,
policy=entry)
except c_exc.CallbackFailure as ex:
raise ext_rbac.RbacPolicyInUse(object_id=entry.object_id,
details=ex)
# make a dict copy because deleting the entry will nullify its
# object_id link to network
entry_dict = entry.to_dict()
entry.delete()
registry.notify(resources.RBAC_POLICY, events.AFTER_DELETE, self,
context=context, object_type=object_type,
policy=entry_dict)
self.object_type_cache.pop(id, None)
def _get_rbac_policy(self, context, id):
object_type = self._get_object_type(context, id)
rbac_class = rbac_obj.RBACBaseObject.get_type_class_map()[object_type]
_rbac_obj = rbac_class.get_object(context, id=id)
if not _rbac_obj:
raise ext_rbac.RbacPolicyNotFound(id=id, object_type=object_type)
return _rbac_obj
@db_api.retry_if_session_inactive()
def get_rbac_policy(self, context, id, fields=None):
return self._make_rbac_policy_dict(
self._get_rbac_policy(context, id), fields=fields)
@db_api.retry_if_session_inactive()
def get_rbac_policies(self, context, filters=None, fields=None,
sorts=None, limit=None, page_reverse=False):
pager = base_obj.Pager(sorts, limit, page_reverse)
filters = filters or {}
object_types = filters.pop('object_type', None)
rbac_classes_to_query = [
o for t, o in rbac_obj.RBACBaseObject.get_type_class_map().items()
if not object_types or t in object_types]
rbac_objs = []
for rbac_class in rbac_classes_to_query:
rbac_objs += rbac_class.get_objects(context, _pager=pager,
**filters)
return [self._make_rbac_policy_dict(_rbac_obj, fields)
for _rbac_obj in rbac_objs]
def _get_object_type(self, context, entry_id):
"""Scans all RBAC tables for an ID to figure out the type.
This will be an expensive operation as the number of RBAC tables grows.
The result is cached since object types cannot be updated for a policy.
"""
if entry_id in self.object_type_cache:
return self.object_type_cache[entry_id]
for otype, rbac_class in \
rbac_obj.RBACBaseObject.get_type_class_map().items():
if rbac_class.count(context, id=entry_id):
self.object_type_cache[entry_id] = otype
return otype
raise ext_rbac.RbacPolicyNotFound(id=entry_id, object_type='unknown')
| apache-2.0 | 5,662,070,589,634,627,000 | 43.909722 | 79 | 0.613422 | false |
graik/biskit | archive_biskit2/Biskit/deprecated/ChainSeparator.py | 1 | 19962 | ## numpy-oldnumeric calls replaced by custom script; 09/06/2016
## Automatically adapted for numpy-oldnumeric Mar 26, 2007 by alter_code1.py
## class ChainSeperator:
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
"""
Seperate PDB into continuous peptide chains for XPlor. Remove duplicate
peptide chains. Required by pdb2xplor.py
This is vintage code. See L{Biskit.PDBCleaner} for a more recent
version (though yet lacking some functions).
@todo: Create an override for the chain comparison if one wants
to keep identical chains (i.e homodimers)
"""
## from Blast2Seq import * # compare 2 sequences
from molUtils import singleAA
import Biskit.tools as T
from LogFile import LogFile
from Scientific.IO.PDB import *
import Biskit.oldnumeric as N0
import string
from difflib import SequenceMatcher
import re
class ChainSeparator:
"""
Open PDB file; give back one chain whenever next() is
called. This class is used by the pdb2xplor script.
This class constitutes vintage code. See
L{Biskit.PDBCleaner} and L{Biskit.Mod.TemplateCleaner} for a more
recent implementation of PDB cleaning.
@todo: The removal of duplicate chains should be transferred to
the PDBCleaner so that this class can be retired
"""
def __init__(self, fname, outPath='', chainIdOffset=0,
capBreaks=0, chainMask=0, log=None ):
"""
@param fname: pdb filename
@type fname: str
@param outPath: path for log file
@type outPath: str
@param chainIdOffset: start chain numbering at this offset
@type chainIdOffset: int
@param capBreaks: add ACE and NME to N- and C-term. of chain breaks [0]
@type capBreaks: 0|1
@param chainMask: chain mask for overriding the default sequence identity [None]
@type chainMask: [1|0]
@param log: LogFile object
@type log: object
"""
self.pdb = Structure(fname);
self.fname = fname
self.outPath = T.absfile( outPath )
self.chainIdOffset = chainIdOffset
self.capBreaks = capBreaks
self.log = LogFile( T.absfile(outPath)+'/' + self.pdbname()+'.log')
if log:
self.log = log
self.chains = self.pdb.peptide_chains
self.counter = -1
self.threshold = 0.9 # sequence identity between multiple copies in PDB
self._expressionCheck(
"[^\n].*[Hh][Oo][Mm][Oo].?[Dd][Ii][Mm][eE][Rr].*\n", 'HOMODIMER')
self._expressionCheck("[^\n].*[Tt][Rr][Ii][Mm][Ee][Rr].*\n", 'TRIMER')
self._hetatomCheck()
self.log.add("Separate chains: \n------------------")
self._removeDuplicateChains(chainMask) # keep only one copy of molecule
self._separateChainBreaks()
self._assign_seg_ids() # new segment id for each chain
def pdbname(self):
"""
Extract pdb code from file name.
@return: (assumed) pdb code
@rtype: str
"""
return T.stripFilename(self.pdb.filename)
def _expressionCheck(self, findExpression, findClean):
"""
Check and report if the regular expression 'findExpression'
exists in the PDB-file. Use this to locate data in the REMARK
section of a pdb file. Prints a warning to stdOut if the
regular expression is found.
@param findExpression: regular expression
@type findExpression: str
@param findClean: clean name of regular expression
@type findClean: str
"""
pdb = open(self.fname,'r')
pdbFile = pdb.read()
searchResult = re.findall(findExpression,pdbFile)
warningMessage = """
WARNINGR! The text string'%s' was found in the PDB-file.
If this PDB-file contains a homodimer one of the chains will be
deleted by this script. To avoid this prepare the file for Xplor manualy \n""" %\
( findClean )
warningMessage2 = """--------------------------------------------\n"""
if len(searchResult) != 0:
self.log.add(warningMessage)
self.log.add("String found in line(s): \n")
for i in range(0,len(searchResult)):
self.log.add(searchResult[i])
self.log.add(warningMessage2)
pdb.close()
def _hetatomCheck(self):
"""
Check and report if there are any none-water HETATMs in the PDB-file
"""
pdb = open(self.fname,'r')
pdbFile = pdb.read()
findExpression = "HETATM.*\n"
searchResult = re.findall(findExpression,pdbFile)
i=0
j = len(searchResult)
while i<j:
if searchResult[i][17:20] == "HOH" or \
searchResult[i][0:6] != "HETATM" :
del searchResult[i]
i=i-1
j=j-1
i=i+1
warningMessage = """
WARNING! The PDB-file contains coordinates for none water HETATMs.
If you want to keep the HETATM - prepare the file for Xplor manualy \n"""
warningMessage2 = "\n"+ 80*"-" + "\n"
if len(searchResult) != 0:
self.log.add(warningMessage)
self.log.add("String found in line(s): \n")
for i in range(0,len(searchResult)):
self.log.add(searchResult[i][0:-1])
self.log.add(warningMessage2)
pdb.close()
def _compareSequences( self, seq1, seq2 ):
"""
@param seq1: sequence 1 to compare
@type seq1: str
@param seq2: sequence 1 to compare
@type seq2: str
@return: identity (0.0 - 1.0) between the two sequences
@rtype : float
"""
# compare the 2 sequences
## blast = Blast2Seq( seq1, seq2 )
## id = blast.run()
matcher = SequenceMatcher( None, ''.join(seq1) , ''.join(seq2) )
return matcher.ratio()
def _removeDuplicateChains(self, chainMask=None):
"""
Get rid of identical chains by comparing all chains with Blast2seq.
@param chainMask: chain mask for overriding the
chain identity checking (default: None)
@type chainMask: [int]
@return: number of chains removed
@rtype: int
"""
chainCount = len(self.chains)
matrix = 1.0 * N0.zeros((chainCount,chainCount))
chain_ids = []
## create identity matrix for all chains against all chains
for i in range(0, chainCount):
chain_ids = chain_ids + [self.chains[i].chain_id] # collect for log file
for j in range(i, len(self.chains)):
# convert 3-letter-code res list into 1-letter-code String
seq1 = singleAA( self.chains[i].sequence() )
seq2 = singleAA( self.chains[j].sequence() )
## if len(seq1) > len(seq2): # take shorter sequence
## # aln len at least half the len of the shortest sequence
## alnCutoff = len(seq2) * 0.5
## else:
## alnCutoff = len(seq1) * 0.5
## if id['aln_len'] > alnCutoff:
## matrix[i,j] = id['aln_id']
## else: # aln length too short, ignore
## matrix[i,j] = 0
matrix[i,j] = self._compareSequences( seq1, seq2 )
## report activity
self.log.add("\n Chain ID's of compared chains: "+str(chain_ids))
self.log.add(" Cross-Identity between chains:\n"+str(matrix))
self.log.add(" Identity threshold used: "+str(self.threshold))
## override the automatic chain deletion by supplying a
## chain mask to this function
if chainMask:
if len(chainMask) == chainCount:
self.chains = N0.compress(chainMask, self.chains)
self.log.add("NOTE: chain mask %s used for removing chains.\n"%chainMask)
else:
self.log.add("########## ERROR ###############")
self.log.add("# Chain mask is only %i chains long"%len(chainMask))
self.log.add("# when a mask of length %i is needed"%chainCount)
self.log.add("# No cleaning will be performed.\n")
if not chainMask:
## look at diagonals in "identity matrix"
## (each chain against each)
duplicate = len(self.chains)
for offset in range(1,chainCount):
diag = N0.diagonal(matrix, offset ,0,1)
# diagonal of 1's mark begin of duplicate
avg = 1.0 * N0.sum(diag)/len(diag)
if (avg >= self.threshold):
duplicate = offset
break
self.chains = self.chains[:duplicate]
self.log.add("NOTE: Identity matrix will be used for removing identical chains.")
## report activit
self.log.add(str(chainCount - len(self.chains))+\
" chains have been removed.\n")
# how many chains have been removed?
return (chainCount - len(self.chains))
def _assign_seg_ids(self):
"""
Assign new segment id to each chain.
"""
counter = self.chainIdOffset
for chain in self.chains:
## Assemble segid from pdb code + one letter out of A to Z
chain.segment_id = self.pdbname()[:3] + string.uppercase[counter]
counter = counter + 1
try: # report changed segement ids
chain_id = chain.chain_id
self.log.add("changed segment ID of chain "+chain_id+\
" to "+chain.segment_id)
except:
T.errWriteln("_assign_seg_ids(): logerror")
def _sequentialDist(self, chain, cutoff, atom):
"""
Calculate sequential atom-atom distance, report residues with
longer distance than cutoff (chain break positions).
@param chain: Scientific.IO.PDB.PeptideChain object
@type chain: object
@param cutoff: threshold for reporting gap (chain break)
@type cutoff: float
@param atom: type of atoms to check (i.e. 'CA')
@type atom: str
@return: list of chain break positions (residue index for each
first residue of two that are too distant)
@rtype: list of int
"""
distanceList = []
v0 = Vector( 0,0,0 )
jump = 1
for res in range(0,len(chain)-2):
try:
v1 = Vector(chain[res][atom].position.array)
## ignore CA with 0,0,0 coordinate
if v1 != v0:
jump = 1
v2 = Vector(chain[ res+jump ][atom].position.array)
## look for next CA with non-zero coordinate
while v2 == v0 and jump + res < len( chain ):
jump += 1
v2 = Vector(chain[ res+jump ][atom].position.array)
if (v1 - v2).length() > cutoff * jump:
distanceList = distanceList + [res + jump - 1]
except:
self.log.add(
"_sequentialDist():\nError while checking CA-CA distance"+\
" between residues "+str(chain[res].name)+\
str(chain[res].number)+" and "+\
str(chain[res+jump].name)+\
str(chain[res+jump].number)+ " in chain "+chain.chain_id)
self.log.add("Error: " + T.lastError() )
return distanceList
## def _sequentialDist(self, chain, cutoff, atom):
## """
## Calculate sequential atom-atom distance, report residues with
## longer distance than cutoff (chain break positions).
## chain - PDB.PeptideChain
## cutoff - float, threshold for reporting gap (chain break)
## atom - str, type of atoms to check (i.e. 'CA')
## -> [int, int, ...], list of chain break positions (residue index
## for each first residue of two that are too distant)
## """
## distanceList = []
## for residue in range(0,len(chain)-1):
## # iterate through residue 1 to ter-1
## try:
## vectorAtom1 = Vector(chain[residue][atom].position.array)
## vectorAtom2 = Vector(chain[residue+1][atom].position.array)
## if (vectorAtom1 - vectorAtom2).length() > cutoff:
## distanceList = distanceList + [residue]
## except:
## self.log.add(
## "_sequentialDist():\nError while checking CA-CA distance"+ \
## " between residues "+str(chain[residue].name)+\
## str(chain[residue].number)+" and "+str(chain[residue+1].name)+\
## str(chain[residue+1].number)+ " in chain "+chain.chain_id)
## self.log.add("Error: " + T.lastError() )
## return distanceList
def _separateChainBreaks(self):
"""
Separate chains with breaks into 2 chains.
The new chain(s) is/are added to the internal PDB instance
(self.chains).
"""
fragments = []
for chain in self.chains:
# res number of residues before a break
breaks = self._sequentialDist(chain, 4.5, 'CA')
self.log.add(str(len(breaks)) + " breaks found in chain " +\
"(" + str(len(chain)) \
+ " residues) " + chain.chain_id + ": "+str(breaks))
previous = 0
ncap_next = 0
for breakRes in breaks:
residues = chain.residues[previous:breakRes+1]
previous = breakRes + 1
chainNew = PeptideChain(residues, chain.chain_id,
chain.segment_id)
if ncap_next:
self.__nCap( chainNew )
ncap_next = 0
if self.capBreaks:
## add N-Methyl to c terminal
self.__cCap( chainNew )
ncap_next = 1
fragments = fragments + [chainNew]
chainNew = PeptideChain(chain.residues[previous:], chain.chain_id,
chain.segment_id)
if ncap_next:
self.__nCap( chainNew )
fragments = fragments + [chainNew]
self.chains = fragments
def __nCap( self, pep_chain ):
"""
Add acetyl capping to N-terminal of peptide chain
"""
n = (pep_chain[0].number or 1) - 1
r = AminoAcidResidue('ACE', number=n, atoms=[Atom('CA', Vector(0,0,0),
element='C')])
pep_chain.residues = [r] + pep_chain.residues
self.log.add('Capping chain break with ACE %i' % n)
def __cCap( self, pep_chain ):
"""
Add methyle amine capping to C-terminal of peptide chain
"""
n = (pep_chain[-1].number or len(pep_chain)) + 1
r = AminoAcidResidue('NME', number=n, atoms=[Atom('CA', Vector(0,0,0),
element='C')])
pep_chain.residues = pep_chain.residues + [r]
self.log.add('Capping chain break at with NME %i' % n)
def extractWaters(self):
"""
Write waters into separate pdb file, called |pdbCode|_waters.pdb.
"""
try:
fTarget = self.outPath + '/' +\
self.pdbname()[:4] + '_waters.pdb'
pdb = PDBFile( fTarget, mode='w' )
waters = []
for key in ['HOH', 'DOD']:
if self.pdb.molecules.has_key( key ):
waters += self.pdb.molecules[ key ]
pdb.nextChain(chain_id='', segment_id='1XWW')
for w in waters:
pdb.nextResidue('TIP3')
## XPLOR wants "ATOM" not "HETATM":
pdb.het_flag = 0
pdb.writeAtom('OH2', w.atoms['O'].position)
## keep TIP3 waters as well
if len(waters) == 0:
try:
TIP3_waters = self.pdb.molecules[ 'TIP3' ]
except:
TIP3_waters = []
for w in TIP3_waters:
pdb.nextResidue('TIP3')
## XPLOR wants "ATOM" not "HETATM":
pdb.het_flag = 0
pdb.writeAtom('OH2', w.atoms['OH2'].position)
pdb.writeAtom('H1', w.atoms['H1'].position)
pdb.writeAtom('H2', w.atoms['H2'].position)
pdb.close()
except:
T.errWriteln("Error writing waters to %s: " % fTarget )
T.errWriteln( T.lastError() )
def next(self):
"""
Return next 'clean', non-redundant, non-broken chain from PDB
@return: Scientific.IO.PDB.PeptideChain, completed chain OR
if no chain is left
@rtype: chain object OR None
"""
self.counter = self.counter + 1
if (len(self.chains) > self.counter):
return self.chains[self.counter]
else:
return None
#############
## TESTING
#############
import Biskit.test as BT
class Test(BT.BiskitTest):
"""Test ChainSeparator """
def prepare(self):
self.fname = T.testRoot() + '/com/1BGS_original.pdb'
self.outPath = T.tempDir()
def cleanUp(self):
T.tryRemove( self.sep.log.fname )
def test_ChainSeparator( self ):
"""ChainSeparator test"""
self.sep = ChainSeparator( self.fname, self.outPath, 1)
self.chain = self.sep.next()
i=1
all_chains = []
while self.chain <> None:
if self.local:
print 'Chain %i:'%i, ''.join(singleAA(self.chain.sequence() ) )
all_chains += self.chain.sequence()
self.chain = self.sep.next()
i += 1
if self.local:
print 'ChainSeparator log file written to: %s'%self.sep.log.fname
r = ''.join( singleAA( all_chains ) )
self.assertEqual(r, self.EXPECTED)
EXPECTED='AQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIRAQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIRAQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIRKKAVINGEQIRSISDLHQTLKKELALPEYYGENLDALWDALTGWVEYPLVLEWRQFEQSKQLTENGAESVLQVFREAKAEGADITIILSKKAVINGEQIRSISDLHQTLKKELALPEYYGENLDALWDALTGWVEYPLVLEWRQFEQSKQLTENGAESVLQVFREAKAEGADITIILSKKAVINGEQIRSISDLHQTLKKELALPEYYGENLDALWDALTGWVEYPLVLEWRQFEQSKQLTENGAESVLQVFREAKAEGADITIILS'
if __name__ == '__main__':
BT.localTest()
| gpl-3.0 | -7,084,247,818,275,736,000 | 35.694853 | 612 | 0.551598 | false |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/docstring.py | 1 | 2522 | from __future__ import print_function
import string
from matplotlib import inspect
class FormatDict(dict):
"""Adapted from http://stackoverflow.com/questions/11283961/partial-string-formatting"""
def __missing__(self, key):
return "{" + key + "}"
class DocReplacer(object):
"""Decorator object for replacing patterns in docstrings using string.format."""
def __init__(self, auto_dedent=True, allow_partial_formatting=False, **doc_dict):
'''
Parameters
-------------
auto_indent : bool
Flag for automatically indenting the replaced lines to the level of the docstring.
allow_partial_formatting : bool
Emnables partial formatting (i.e., not all keys are available in the dictionary)
doc_dict : kwargs
Pattern in docstring that a key in this dict will be replaced by the corresponding values.
Example
-------------
TODO: Update this documentation
@DocReplacer({'p1': 'p1 : int\n\tFirst parameter'})
def foo(p1):
"""
Some functions.
Params:
{p1}
"""
will result in foo's docstring being:
"""
Some functions.
Params:
p1 : int
First parameter
"""
'''
self.doc_dict = doc_dict
self.auto_dedent = auto_dedent
self.allow_partial_formatting = allow_partial_formatting
def __call__(self, func):
if func.__doc__:
doc = func.__doc__
if self.auto_dedent:
doc = inspect.cleandoc(doc)
func.__doc__ = self._format(doc)
return func
def replace(self):
"""Reformat values inside the self.doc_dict using self.doc_dict
TODO: Make support for partial_formatting
"""
doc_dict = self.doc_dict.copy()
for k, v in doc_dict.items():
if '{' and '}' in v:
self.doc_dict[k] = v.format(**doc_dict)
def update(self, *args, **kwargs):
"Assume self.params is a dict and update it with supplied args"
self.doc_dict.update(*args, **kwargs)
def _format(self, doc):
""" Formats the docstring using self.doc_dict """
if self.allow_partial_formatting:
mapping = FormatDict(self.doc_dict)
else:
mapping = self.doc_dict
formatter = string.Formatter()
return formatter.vformat(doc, (), mapping)
| mit | 8,740,734,155,669,062,000 | 30.135802 | 102 | 0.560666 | false |
simodalla/django-custom-email-user | setup.py | 1 | 1526 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import custom_email_user
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = custom_email_user.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-custom-email-user',
version=version,
description="""Cus""",
long_description=readme + '\n\n' + history,
author='Simone Dalla',
author_email='[email protected]',
url='https://github.com/simodalla/django-custom-email-user',
packages=[
'custom_email_user',
],
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='django-custom-email-user',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| bsd-3-clause | -4,183,705,017,668,472,000 | 26.745455 | 66 | 0.610092 | false |
greggian/TapdIn | django/contrib/gis/tests/relatedapp/tests.py | 1 | 15132 | import os, unittest
from django.contrib.gis.geos import *
from django.contrib.gis.db.backend import SpatialBackend
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.tests.utils import no_mysql, no_oracle, no_spatialite
from django.conf import settings
from models import City, Location, DirectoryEntry, Parcel, Book, Author
cities = (('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
class RelatedGeoModelTest(unittest.TestCase):
def test01_setup(self):
"Setting up for related model tests."
for name, state, lon, lat in cities:
loc = Location.objects.create(point=Point(lon, lat))
c = City.objects.create(name=name, state=state, location=loc)
@no_oracle # TODO: Fix select_related() problems w/Oracle and pagination.
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
@no_oracle # Pagination problem is implicated in this test as well.
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes Roswell.
all_extent = (-104.528060913086, 33.0583305358887,-79.4607315063477, 40.1847610473633)
txpa_extent = (-97.51611328125, 33.0583305358887,-79.4607315063477, 40.1847610473633)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(name='Roswell').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if SpatialBackend.oracle:
ref_u1 = MultiPoint(p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
ref_u1 = MultiPoint(p1, p2, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name='Roswell').unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# the same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not SpatialBackend.mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not SpatialBackend.mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.failUnless(isinstance(d['point'], SpatialBackend.Geometry))
self.failUnless(isinstance(t[1], SpatialBackend.Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# Adding two more cities, but this time making sure that their location
# ID values do not match their City ID values.
loc1 = Location.objects.create(point='POINT (-95.363151 29.763374)')
loc2 = Location.objects.create(point='POINT (-96.801611 32.782057)')
dallas = City.objects.create(name='Dallas', state='TX', location=loc2)
houston = City.objects.create(name='Houston', state='TX', location=loc1)
# The expected ID values -- notice the last two location IDs
# are out of order. We want to make sure that the related
# location ID column is selected instead of ID column for
# the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.failUnless('Aurora' in names)
self.failUnless('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# Creating a new City, 'Fort Worth', that uses the same location
# as Dallas.
dallas = City.objects.get(name='Dallas')
ftworth = City.objects.create(name='Fort Worth', state='TX', location=dallas.location)
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Creating some data for the Book/Author non-geo models that
# use GeoManager. See #11087.
tp = Author.objects.create(name='Trevor Paglen')
Book.objects.create(title='Torture Taxi', author=tp)
Book.objects.create(title='I Could Tell You But Then You Would Have to be Destroyed by Me', author=tp)
Book.objects.create(title='Blank Spots on the Map', author=tp)
wp = Author.objects.create(name='William Patry')
Book.objects.create(title='Patry on Copyright', author=wp)
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books. Also testing
# with a `GeoValuesQuerySet` (see #11489).
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = fromstr('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
# TODO: Related tests for KML, GML, and distance lookups.
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(RelatedGeoModelTest))
return s
| apache-2.0 | -1,316,497,183,138,970,000 | 48.272425 | 155 | 0.629923 | false |
ceph/ceph-deploy | docs/source/conf.py | 1 | 8511 | # -*- coding: utf-8 -*-
#
# ceph-deploy documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 21 09:32:42 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_themes'))
sys.path.insert(0, os.path.abspath('../..'))
import ceph_deploy
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'ceph-deploy'
copyright = u'2013, Inktank'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ceph_deploy.__version__
# The full version, including alpha/beta/rc tags.
release = ceph_deploy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'ceph'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ceph-deploydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ceph-deploy.tex', u'ceph-deploy Documentation',
u'Inktank', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ceph-deploy', u'ceph-deploy Documentation',
[u'Inktank'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ceph-deploy', u'ceph-deploy Documentation',
u'Inktank', 'ceph-deploy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# XXX Uncomment when we are ready to link to ceph docs
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
| mit | -1,458,653,863,186,704,100 | 30.757463 | 79 | 0.70873 | false |
fbradyirl/home-assistant | homeassistant/components/zha/core/channels/__init__.py | 1 | 12998 | """
Channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import asyncio
from concurrent.futures import TimeoutError as Timeout
from enum import Enum
from functools import wraps
import logging
from random import uniform
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from ..const import (
CHANNEL_ATTRIBUTE,
CHANNEL_EVENT_RELAY,
CHANNEL_ZDO,
REPORT_CONFIG_DEFAULT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_RPT_CHANGE,
SIGNAL_ATTR_UPDATED,
)
from ..helpers import LogMixin, get_attr_id_by_name, safe_read
from ..registries import CLUSTER_REPORT_CONFIGS
_LOGGER = logging.getLogger(__name__)
def parse_and_log_command(channel, tsn, command_id, args):
"""Parse and log a zigbee cluster command."""
cmd = channel.cluster.server_commands.get(command_id, [command_id])[0]
channel.debug(
"received '%s' command with %s args on cluster_id '%s' tsn '%s'",
cmd,
args,
channel.cluster.cluster_id,
tsn,
)
return cmd
def decorate_command(channel, command):
"""Wrap a cluster command to make it safe."""
@wraps(command)
async def wrapper(*args, **kwds):
from zigpy.exceptions import DeliveryError
try:
result = await command(*args, **kwds)
channel.debug(
"executed command: %s %s %s %s",
command.__name__,
"{}: {}".format("with args", args),
"{}: {}".format("with kwargs", kwds),
"{}: {}".format("and result", result),
)
return result
except (DeliveryError, Timeout) as ex:
channel.debug("command failed: %s exception: %s", command.__name__, str(ex))
return ex
return wrapper
class ChannelStatus(Enum):
"""Status of a channel."""
CREATED = 1
CONFIGURED = 2
INITIALIZED = 3
class ZigbeeChannel(LogMixin):
"""Base channel for a Zigbee cluster."""
CHANNEL_NAME = None
REPORT_CONFIG = ()
def __init__(self, cluster, device):
"""Initialize ZigbeeChannel."""
self._channel_name = cluster.ep_attribute
if self.CHANNEL_NAME:
self._channel_name = self.CHANNEL_NAME
self._generic_id = "channel_0x{:04x}".format(cluster.cluster_id)
self._cluster = cluster
self._zha_device = device
self._unique_id = "{}:{}:0x{:04x}".format(
str(device.ieee), cluster.endpoint.endpoint_id, cluster.cluster_id
)
# this keeps logs consistent with zigpy logging
self._log_id = "0x{:04x}:{}:0x{:04x}".format(
device.nwk, cluster.endpoint.endpoint_id, cluster.cluster_id
)
self._report_config = CLUSTER_REPORT_CONFIGS.get(
self._cluster.cluster_id, self.REPORT_CONFIG
)
self._status = ChannelStatus.CREATED
self._cluster.add_listener(self)
@property
def generic_id(self):
"""Return the generic id for this channel."""
return self._generic_id
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the zigpy cluster for this channel."""
return self._cluster
@property
def device(self):
"""Return the device this channel is linked to."""
return self._zha_device
@property
def name(self) -> str:
"""Return friendly name."""
return self._channel_name
@property
def status(self):
"""Return the status of the channel."""
return self._status
def set_report_config(self, report_config):
"""Set the reporting configuration."""
self._report_config = report_config
async def bind(self):
"""Bind a zigbee cluster.
This also swallows DeliveryError exceptions that are thrown when
devices are unreachable.
"""
from zigpy.exceptions import DeliveryError
try:
res = await self.cluster.bind()
self.debug("bound '%s' cluster: %s", self.cluster.ep_attribute, res[0])
except (DeliveryError, Timeout) as ex:
self.debug(
"Failed to bind '%s' cluster: %s", self.cluster.ep_attribute, str(ex)
)
async def configure_reporting(
self,
attr,
report_config=(
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
),
):
"""Configure attribute reporting for a cluster.
This also swallows DeliveryError exceptions that are thrown when
devices are unreachable.
"""
from zigpy.exceptions import DeliveryError
attr_name = self.cluster.attributes.get(attr, [attr])[0]
kwargs = {}
if self.cluster.cluster_id >= 0xFC00 and self.device.manufacturer_code:
kwargs["manufacturer"] = self.device.manufacturer_code
min_report_int, max_report_int, reportable_change = report_config
try:
res = await self.cluster.configure_reporting(
attr, min_report_int, max_report_int, reportable_change, **kwargs
)
self.debug(
"reporting '%s' attr on '%s' cluster: %d/%d/%d: Result: '%s'",
attr_name,
self.cluster.ep_attribute,
min_report_int,
max_report_int,
reportable_change,
res,
)
except (DeliveryError, Timeout) as ex:
self.debug(
"failed to set reporting for '%s' attr on '%s' cluster: %s",
attr_name,
self.cluster.ep_attribute,
str(ex),
)
async def async_configure(self):
"""Set cluster binding and attribute reporting."""
# Xiaomi devices don't need this and it disrupts pairing
if self._zha_device.manufacturer != "LUMI":
await self.bind()
if self.cluster.cluster_id not in self.cluster.endpoint.out_clusters:
for report_config in self._report_config:
await self.configure_reporting(
report_config["attr"], report_config["config"]
)
await asyncio.sleep(uniform(0.1, 0.5))
self.debug("finished channel configuration")
self._status = ChannelStatus.CONFIGURED
async def async_initialize(self, from_cache):
"""Initialize channel."""
self.debug("initializing channel: from_cache: %s", from_cache)
self._status = ChannelStatus.INITIALIZED
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
pass
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
pass
@callback
def zdo_command(self, *args, **kwargs):
"""Handle ZDO commands on this cluster."""
pass
@callback
def zha_send_event(self, cluster, command, args):
"""Relay events to hass."""
self._zha_device.hass.bus.async_fire(
"zha_event",
{
"unique_id": self._unique_id,
"device_ieee": str(self._zha_device.ieee),
"command": command,
"args": args,
},
)
async def async_update(self):
"""Retrieve latest state from cluster."""
pass
async def get_attribute_value(self, attribute, from_cache=True):
"""Get the value for an attribute."""
manufacturer = None
manufacturer_code = self._zha_device.manufacturer_code
if self.cluster.cluster_id >= 0xFC00 and manufacturer_code:
manufacturer = manufacturer_code
result = await safe_read(
self._cluster,
[attribute],
allow_cache=from_cache,
only_cache=from_cache,
manufacturer=manufacturer,
)
return result.get(attribute)
def log(self, level, msg, *args):
"""Log a message."""
msg = "[%s]: " + msg
args = (self._log_id,) + args
_LOGGER.log(level, msg, *args)
def __getattr__(self, name):
"""Get attribute or a decorated cluster command."""
if hasattr(self._cluster, name) and callable(getattr(self._cluster, name)):
command = getattr(self._cluster, name)
command.__name__ = name
return decorate_command(self, command)
return self.__getattribute__(name)
class AttributeListeningChannel(ZigbeeChannel):
"""Channel for attribute reports from the cluster."""
CHANNEL_NAME = CHANNEL_ATTRIBUTE
REPORT_CONFIG = [{"attr": 0, "config": REPORT_CONFIG_DEFAULT}]
def __init__(self, cluster, device):
"""Initialize AttributeListeningChannel."""
super().__init__(cluster, device)
attr = self._report_config[0].get("attr")
if isinstance(attr, str):
self.value_attribute = get_attr_id_by_name(self.cluster, attr)
else:
self.value_attribute = attr
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
if attrid == self.value_attribute:
async_dispatcher_send(
self._zha_device.hass,
"{}_{}".format(self.unique_id, SIGNAL_ATTR_UPDATED),
value,
)
async def async_initialize(self, from_cache):
"""Initialize listener."""
await self.get_attribute_value(
self._report_config[0].get("attr"), from_cache=from_cache
)
await super().async_initialize(from_cache)
class ZDOChannel(LogMixin):
"""Channel for ZDO events."""
def __init__(self, cluster, device):
"""Initialize ZDOChannel."""
self.name = CHANNEL_ZDO
self._cluster = cluster
self._zha_device = device
self._status = ChannelStatus.CREATED
self._unique_id = "{}:{}_ZDO".format(str(device.ieee), device.name)
self._cluster.add_listener(self)
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the aigpy cluster for this channel."""
return self._cluster
@property
def status(self):
"""Return the status of the channel."""
return self._status
@callback
def device_announce(self, zigpy_device):
"""Device announce handler."""
pass
@callback
def permit_duration(self, duration):
"""Permit handler."""
pass
async def async_initialize(self, from_cache):
"""Initialize channel."""
entry = self._zha_device.gateway.zha_storage.async_get_or_create(
self._zha_device
)
self.debug("entry loaded from storage: %s", entry)
self._status = ChannelStatus.INITIALIZED
async def async_configure(self):
"""Configure channel."""
self._status = ChannelStatus.CONFIGURED
def log(self, level, msg, *args):
"""Log a message."""
msg = "[%s:ZDO](%s): " + msg
args = (self._zha_device.nwk, self._zha_device.model) + args
_LOGGER.log(level, msg, *args)
class EventRelayChannel(ZigbeeChannel):
"""Event relay that can be attached to zigbee clusters."""
CHANNEL_NAME = CHANNEL_EVENT_RELAY
@callback
def attribute_updated(self, attrid, value):
"""Handle an attribute updated on this cluster."""
self.zha_send_event(
self._cluster,
SIGNAL_ATTR_UPDATED,
{
"attribute_id": attrid,
"attribute_name": self._cluster.attributes.get(attrid, ["Unknown"])[0],
"value": value,
},
)
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle a cluster command received on this cluster."""
if (
self._cluster.server_commands is not None
and self._cluster.server_commands.get(command_id) is not None
):
self.zha_send_event(
self._cluster, self._cluster.server_commands.get(command_id)[0], args
)
# pylint: disable=wrong-import-position
from . import closures # noqa
from . import general # noqa
from . import homeautomation # noqa
from . import hvac # noqa
from . import lighting # noqa
from . import lightlink # noqa
from . import manufacturerspecific # noqa
from . import measurement # noqa
from . import protocol # noqa
from . import security # noqa
from . import smartenergy # noqa
| apache-2.0 | 7,062,243,123,169,791,000 | 30.548544 | 88 | 0.585859 | false |
opencobra/cobrapy | src/cobra/test/test_sampling/test_optgp.py | 1 | 1936 | """Test functionalities of OptGPSampler."""
from typing import TYPE_CHECKING
import numpy as np
import pytest
from cobra.sampling import OptGPSampler
if TYPE_CHECKING:
from cobra import Model
from cobra.sampling import ACHRSampler
@pytest.fixture(scope="function")
def optgp(model: "Model") -> OptGPSampler:
"""Return OptGPSampler instance for tests."""
sampler = OptGPSampler(model, processes=1, thinning=1)
assert (sampler.n_warmup > 0) and (sampler.n_warmup <= 2 * len(model.variables))
assert all(sampler.validate(sampler.warmup) == "v")
return sampler
def test_optgp_init_benchmark(model: "Model", benchmark) -> None:
"""Benchmark inital OptGP sampling."""
benchmark(lambda: OptGPSampler(model, processes=2))
def test_optgp_sample_benchmark(optgp: "Model", benchmark) -> None:
"""Benchmark OptGP sampling."""
benchmark(optgp.sample, 1)
def test_sampling(optgp: OptGPSampler) -> None:
"""Test sampling."""
s = optgp.sample(10)
assert all(optgp.validate(s) == "v")
def test_batch_sampling(optgp: OptGPSampler) -> None:
"""Test batch sampling."""
for b in optgp.batch(5, 4):
assert all(optgp.validate(b) == "v")
def test_variables_samples(achr: "ACHRSampler", optgp: OptGPSampler) -> None:
"""Test variable samples."""
vnames = np.array([v.name for v in achr.model.variables])
s = optgp.sample(10, fluxes=False)
assert s.shape == (10, optgp.warmup.shape[1])
assert (s.columns == vnames).all()
assert (optgp.validate(s) == "v").all()
def test_reproject(optgp: OptGPSampler) -> None:
"""Test reprojection of sampling."""
s = optgp.sample(10, fluxes=False).values
proj = np.apply_along_axis(optgp._reproject, 1, s)
assert all(optgp.validate(proj) == "v")
s = np.random.rand(10, optgp.warmup.shape[1])
proj = np.apply_along_axis(optgp._reproject, 1, s)
assert all(optgp.validate(proj) == "v")
| gpl-2.0 | 5,998,137,731,282,568,000 | 28.784615 | 84 | 0.673037 | false |
JustFixNYC/who-owns-what | wow/tests/conftest.py | 1 | 1024 | import pytest
import psycopg2
import dbtool
@pytest.fixture(scope='session')
def django_db_setup(django_db_setup, django_db_blocker):
from django.conf import settings
wow = settings.DATABASES['wow']
with django_db_blocker.unblock():
db = dbtool.DbContext(
host=wow['HOST'],
database=wow['NAME'],
user=wow['USER'],
password=wow['PASSWORD'],
port=wow['PORT'] or 5432,
)
# If we're run with --reuse-db, the database might already
# be scaffolded for us, in which case we don't need to
# do anything.
is_already_built = False
conn = db.connection()
with conn:
with conn.cursor() as cursor:
try:
cursor.execute('select * from wow_bldgs limit 1;')
is_already_built = True
except psycopg2.errors.UndefinedTable:
pass
if not is_already_built:
dbtool.loadtestdata(db)
| gpl-3.0 | -1,612,363,369,483,214,000 | 29.117647 | 70 | 0.553711 | false |
jemdwood/cs234_proj | pretrain-atari.py | 1 | 11760 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: train-atari.py
# Original Author (we/[email protected] editted): Yuxin Wu <[email protected]>
import numpy as np
import os
import sys
import time
import random
import uuid
import argparse
import multiprocessing
import threading
import cv2
import tensorflow as tf
import six
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.serialize import *
from tensorpack.utils.stats import *
from tensorpack.tfutils import symbolic_functions as symbf
from tensorpack.tfutils.gradproc import MapGradient, SummaryGradient
from tensorpack.RL import *
from simulator import *
import common
from common import (play_model, Evaluator, eval_model_multithread,
play_one_episode, play_n_episodes)
from records_dataflow import RecordsDataFlow
from kurin_dataflow import KurinDataFlow
if six.PY3:
from concurrent import futures
CancelledError = futures.CancelledError
else:
CancelledError = Exception
IMAGE_SIZE = (84, 84)
FRAME_HISTORY = 4
GAMMA = 0.99
CHANNEL = FRAME_HISTORY * 3
IMAGE_SHAPE3 = IMAGE_SIZE + (CHANNEL,)
LOCAL_TIME_MAX = 5
EVAL_EPISODE = 50
BATCH_SIZE = 128
PREDICT_BATCH_SIZE = 15 # batch for efficient forward
SIMULATOR_PROC = 50
PREDICTOR_THREAD_PER_GPU = 3
PREDICTOR_THREAD = None
EVALUATE_PROC = min(multiprocessing.cpu_count() // 2, 20)
NUM_ACTIONS = None
ENV_NAME = None
BASE_PATH = '/data_4/rl'
def get_player(viz=False, train=False, dumpdir=None):
pl = GymEnv(ENV_NAME, viz=viz, dumpdir=dumpdir)
pl = MapPlayerState(pl, lambda img: cv2.resize(img, IMAGE_SIZE[::-1]))
global NUM_ACTIONS
NUM_ACTIONS = pl.get_action_space().num_actions()
pl = HistoryFramePlayer(pl, FRAME_HISTORY)
if not train:
pl = PreventStuckPlayer(pl, 30, 1)
else:
pl = LimitLengthPlayer(pl, 40000)
return pl
class MySimulatorWorker(SimulatorProcess):
def _build_player(self):
return get_player(train=True)
class Model(ModelDesc):
def _get_inputs(self):
assert NUM_ACTIONS is not None
return [InputDesc(tf.uint8, (None,) + IMAGE_SHAPE3, 'state'),
InputDesc(tf.int64, (None,), 'action'),
InputDesc(tf.float32, (None,), 'futurereward')]
def _get_NN_prediction(self, image):
image = tf.cast(image, tf.float32) / 255.0
with argscope(Conv2D, nl=tf.nn.relu):
l = Conv2D('conv0', image, out_channel=32, kernel_shape=5)
l = MaxPooling('pool0', l, 2)
l = Conv2D('conv1', l, out_channel=32, kernel_shape=5)
l = MaxPooling('pool1', l, 2)
l = Conv2D('conv2', l, out_channel=64, kernel_shape=4)
l = MaxPooling('pool2', l, 2)
l = Conv2D('conv3', l, out_channel=64, kernel_shape=3)
l = FullyConnected('fc0', l, 512, nl=tf.identity)
l = PReLU('prelu', l)
logits = FullyConnected('fc-pi', l, out_dim=NUM_ACTIONS, nl=tf.identity) # unnormalized policy
value = FullyConnected('fc-v', l, 1, nl=tf.identity)
return logits, value
def _build_graph(self, inputs):
state, action, futurereward = inputs
logits, self.value = self._get_NN_prediction(state)
self.value = tf.squeeze(self.value, [1], name='pred_value') # (B,)
self.policy = tf.nn.softmax(logits, name='policy')
expf = tf.get_variable('explore_factor', shape=[],
initializer=tf.constant_initializer(1), trainable=False)
policy_explore = tf.nn.softmax(logits * expf, name='policy_explore')
is_training = get_current_tower_context().is_training
if not is_training:
return
log_probs = tf.log(self.policy + 1e-6)
log_pi_a_given_s = tf.reduce_sum(
log_probs * tf.one_hot(action, NUM_ACTIONS), 1)
advantage = tf.subtract(tf.stop_gradient(self.value), futurereward, name='advantage')
policy_loss = tf.reduce_sum(log_pi_a_given_s * advantage, name='policy_loss')
xentropy_loss = tf.reduce_sum(
self.policy * log_probs, name='xentropy_loss')
value_loss = tf.nn.l2_loss(self.value - futurereward, name='value_loss')
pred_reward = tf.reduce_mean(self.value, name='predict_reward')
advantage = symbf.rms(advantage, name='rms_advantage')
entropy_beta = tf.get_variable('entropy_beta', shape=[],
initializer=tf.constant_initializer(0.01), trainable=False)
self.cost = tf.add_n([policy_loss, xentropy_loss * entropy_beta, value_loss])
self.cost = tf.truediv(self.cost,
tf.cast(tf.shape(futurereward)[0], tf.float32),
name='cost')
summary.add_moving_summary(policy_loss, xentropy_loss,
value_loss, pred_reward, advantage, self.cost)
def _get_optimizer(self):
lr = symbf.get_scalar_var('learning_rate', 0.001, summary=True)
opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
gradprocs = [MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.1)),
SummaryGradient()]
opt = optimizer.apply_grad_processors(opt, gradprocs)
return opt
class MySimulatorMaster(SimulatorMaster, Callback):
def __init__(self, pipe_c2s, pipe_s2c, model):
super(MySimulatorMaster, self).__init__(pipe_c2s, pipe_s2c)
self.M = model
self.queue = queue.Queue(maxsize=BATCH_SIZE * 8 * 2)
def _setup_graph(self):
self.async_predictor = MultiThreadAsyncPredictor(
self.trainer.get_predictors(['state'], ['policy_explore', 'pred_value'],
PREDICTOR_THREAD), batch_size=PREDICT_BATCH_SIZE)
def _before_train(self):
self.async_predictor.start()
def _on_state(self, state, ident):
def cb(outputs):
try:
distrib, value = outputs.result()
except CancelledError:
logger.info("Client {} cancelled.".format(ident))
return
assert np.all(np.isfinite(distrib)), distrib
action = np.random.choice(len(distrib), p=distrib)
client = self.clients[ident]
client.memory.append(TransitionExperience(state, action, None, value=value))
self.send_queue.put([ident, dumps(action)])
self.async_predictor.put_task([state], cb)
def _on_episode_over(self, ident):
self._parse_memory(0, ident, True)
def _on_datapoint(self, ident):
client = self.clients[ident]
if len(client.memory) == LOCAL_TIME_MAX + 1:
R = client.memory[-1].value
self._parse_memory(R, ident, False)
def _parse_memory(self, init_r, ident, isOver):
client = self.clients[ident]
mem = client.memory
if not isOver:
last = mem[-1]
mem = mem[:-1]
mem.reverse()
R = float(init_r)
for idx, k in enumerate(mem):
R = np.clip(k.reward, -1, 1) + GAMMA * R
self.queue.put([k.state, k.action, R])
if not isOver:
client.memory = [last]
else:
client.memory = []
def get_config(env):
assert NUM_ACTIONS is not None
dirname = os.path.join('train_log', 'pretrain-atari-{}'.format(ENV_NAME))
logger.set_logger_dir(dirname)
M = Model()
#name_base = str(uuid.uuid1())[:6]
#PIPE_DIR = os.environ.get('TENSORPACK_PIPEDIR', '.').rstrip('/')
#namec2s = 'ipc://{}/sim-c2s-{}'.format(PIPE_DIR, name_base)
#names2c = 'ipc://{}/sim-s2c-{}'.format(PIPE_DIR, name_base)
#procs = [MySimulatorWorker(k, namec2s, names2c) for k in range(SIMULATOR_PROC)]
#ensure_proc_terminate(procs)
#start_proc_mask_signal(procs)
#master = MySimulatorMaster(namec2s, names2c, M)
if env == 'Breakout-v0':
df = RecordsDataFlow('all')
else:
df = KurinDataFlow('all',
record_folder=BASE_PATH, gym_game_name=env)
dataflow = BatchData(df, BATCH_SIZE)
print('Pre-training dataset size: {}'.format(df.size()))
#print('Average human performance: {}'.format(df.avg_human_score))
return TrainConfig(
model=M,
dataflow=dataflow,
callbacks=[
ModelSaver(),
ScheduledHyperParamSetter('learning_rate', [(20, 0.0003), (120, 0.0001)]),
ScheduledHyperParamSetter('entropy_beta', [(80, 0.005)]),
ScheduledHyperParamSetter('explore_factor',
[(80, 2), (100, 3), (120, 4), (140, 5)]),
HumanHyperParamSetter('learning_rate'),
HumanHyperParamSetter('entropy_beta'),
#master,
#StartProcOrThread(master),
PeriodicTrigger(Evaluator(
EVAL_EPISODE, ['state'], ['policy'], get_player),
every_k_epochs=1),
],
session_creator=sesscreate.NewSessionCreator(
config=get_default_sess_config(0.5)),
steps_per_epoch=dataflow.size(),
max_epoch=5,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--env', help='env', required=True)
parser.add_argument('--task', help='task to perform',
choices=['play', 'eval', 'train', 'gen_submit'], default='train')
parser.add_argument('--output', help='output directory for submission', default='output_dir')
parser.add_argument('--episode', help='number of episode to eval', default=100, type=int)
args = parser.parse_args()
ENV_NAME = args.env
assert ENV_NAME
logger.info("Environment Name: {}".format(ENV_NAME))
p = get_player()
del p # set NUM_ACTIONS
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.task != 'train':
assert args.load is not None
if args.task != 'train':
cfg = PredictConfig(
model=Model(),
session_init=get_model_loader(args.load),
input_names=['state'],
output_names=['policy'])
if args.task == 'play':
play_model(cfg, get_player(viz=0.01))
elif args.task == 'eval':
eval_model_multithread(cfg, args.episode, get_player)
elif args.task == 'gen_submit':
play_n_episodes(
get_player(train=False, dumpdir=args.output),
OfflinePredictor(cfg), args.episode)
# gym.upload(output, api_key='xxx')
else:
nr_gpu = get_nr_gpu()
if nr_gpu > 0:
if nr_gpu > 1:
predict_tower = list(range(nr_gpu))[-nr_gpu // 2:]
else:
predict_tower = [0]
PREDICTOR_THREAD = len(predict_tower) * PREDICTOR_THREAD_PER_GPU
train_tower = list(range(nr_gpu))[:-nr_gpu // 2] or [0]
logger.info("[BA3C] Train on gpu {} and infer on gpu {}".format(
','.join(map(str, train_tower)), ','.join(map(str, predict_tower))))
trainer = AsyncMultiGPUTrainer
else:
logger.warn("Without GPU this model will never learn! CPU is only useful for debug.")
nr_gpu = 0
PREDICTOR_THREAD = 1
predict_tower, train_tower = [0], [0]
trainer = QueueInputTrainer
config = get_config(args.env)
if args.load:
config.session_init = get_model_loader(args.load)
config.tower = train_tower
config.predict_tower = predict_tower
trainer(config).train()
| mit | -4,611,083,490,077,505,500 | 36.571885 | 105 | 0.598469 | false |
borgaster/SpaceWarsEvolved | main.py | 1 | 16816 | import time
from animation import *
from asteroidField import *
from background import *
from loader import *
from physics import *
from player import *
from powerup import *
import pygame
from pygame.locals import *
from rotatingMenu_img import *
from spacemenu import *
from starField import *
# teclas dos jogadores default
keyPresset1 = [K_LEFT,K_RIGHT,K_UP,K_DOWN, K_SPACE, K_m]
keyPresset2 = [K_a, K_d, K_w, K_s, K_x, K_r]
pygame.init()
def game(numkills,nave1,nave2):
SCREENSIZE = [800,600]
#screen = pygame.display.set_mode(SCREENSIZE,pygame.FULLSCREEN)
## uncomment for debug
screen = pygame.display.set_mode(SCREENSIZE)
pygame.mouse.set_visible(0)
clock = pygame.time.Clock()
#init background
background = Background(screen,'galaxy.jpg')
#init efeito campo estrelado e asteroids
starfield = StarField(screen)
asteroidField = AsteroidField(screen)
#init musica
rand = random.randrange(0,2)
# if rand == 0:
# load_music('After Burner.mp3')
#else:
#load_music('Spybreak.mp3')
#load_music('Gundam.mp3')
#init players
player1 = Player((200,SCREENSIZE[1]/2),keyPresset1,1,nave1,numkills)
playerSprite1 = pygame.sprite.RenderPlain((player1))
player1.spin(90,3)
player2 = Player((SCREENSIZE[0]-200,SCREENSIZE[1]/2),keyPresset2,2,nave2,numkills)
playerSprite2 = pygame.sprite.RenderPlain((player2))
player2.spin(90,1)
#powerup stuff variables
powerups_on_screen = False
done = False
retval = 0
powerup_available = 0
#vars apenas para animacao do rapaz no canto do ecra
i = random.randrange(1,4)
pickup_timer = 0
while not done:
clock.tick(40)
#se nao ha asteroides, respawn
current_asteroids = len(asteroidField.asteroidSprites)
if current_asteroids <= 0:
current_asteroids = asteroidField.refresh(asteroidField.num_asteroids +1)
if pickup_timer != 0:
elapsed = round(time.clock())
##desenhar informacoes do jogadores
font = pygame.font.SysFont("consola", 20)
ScorePanel1 ="Player 1 - Lives: "+str(player1.statistics[0])+" "+"Score: "+str(player1.statistics[3])
scorePlayer1 = font.render(ScorePanel1, True, (255,255,255))
if nave2 != 0:
ScorePanel2 ="Player 2 - Lives: "+str(player2.statistics[0])+" Score: "+str(player2.statistics[3])
scorePlayer2 = font.render(ScorePanel2, True, (255,255,255))
# desenhar informacoes de powerups disponiveis
font = pygame.font.SysFont("consola", 40)
PowerupPanel = ""
if powerups_on_screen == False:
poweruppanel = font.render(PowerupPanel, True, (0,255,0))
#############################
##MOVER JOGADORES
#se esta so um jogador
if nave2 == 0:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
elif event.key == keyPresset1[0]:
player1.dx = -10
player1.spin(90,1)
elif event.key == keyPresset1[1]:
player1.dx = 10
player1.spin(90,3)
elif event.key == keyPresset1[2]:
player1.dy = -10
player1.spin(90,0)
elif event.key == keyPresset1[3]:
player1.dy = 10
player1.spin(90,2)
elif event.type == KEYUP:
if event.key == keyPresset1[0]:
player1.dx = -3
elif event.key == keyPresset1[1]:
player1.dx = 3
elif event.key == keyPresset1[2]:
player1.dy = -3
elif event.key == keyPresset1[3]:
player1.dy = 3
elif event.key == keyPresset1[5]:
player1.changeWeapon()
# ha dois jogadores a jogar, apanhar teclas todas
else:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
elif event.key == keyPresset1[0]:
player1.dx = -10
player1.spin(90,1)
elif event.key == keyPresset1[1]:
player1.dx = 10
player1.spin(90,3)
elif event.key == keyPresset1[2]:
player1.dy = -10
player1.spin(90,0)
elif event.key == keyPresset1[3]:
player1.dy = 10
player1.spin(90,2)
elif event.key == keyPresset2[0]:
player2.dx = -10
player2.spin(90,1)
elif event.key == keyPresset2[1]:
player2.dx = 10
player2.spin(90,3)
elif event.key == keyPresset2[2]:
player2.dy = -10
player2.spin(90,0)
elif event.key == keyPresset2[3]:
player2.dy = 10
player2.spin(90,2)
elif event.type == KEYUP:
if event.key == keyPresset1[0]:
player1.dx = -3
elif event.key == keyPresset1[1]:
player1.dx = 3
elif event.key == keyPresset1[2]:
player1.dy = -3
elif event.key == keyPresset1[3]:
player1.dy = 3
elif event.key == keyPresset1[5]:
player1.changeWeapon()
elif event.key == keyPresset2[0]:
player2.dx = -3
elif event.key == keyPresset2[1]:
player2.dx = 3
elif event.key == keyPresset2[2]:
player2.dy = -3
elif event.key == keyPresset2[3]:
player2.dy = 3
elif event.key == keyPresset2[5]:
player2.changeWeapon()
background.update()
starfield.update()
#calcular tempo de activacao de um powerup novo e o tipo
#se estiver em single player so ha powerup de armas
activate_powerups = random.randrange(0,200)
if nave2 != 0:
powerup_type = random.randrange(1,4)
else:
powerup_type = 2
if activate_powerups == 150:
if powerups_on_screen == False:
powerup_available = powerup_type
if (powerup_type == 1):
PowerupPanel = "Health Powerup Available!"
poweruppanel = font.render(PowerupPanel, True, (0,255,0))
elif powerup_type == 2:
PowerupPanel = "Weapon Powerup Available!"
poweruppanel = font.render(PowerupPanel, True, (255,0,0))
else:
PowerupPanel = "Mines Available!!"
poweruppanel = font.render(PowerupPanel, True, (255,0,0))
powerup = Powerup(powerup_available,SCREENSIZE)
powerupSprite = pygame.sprite.RenderPlain((powerup))
powerups_on_screen = True
## POWERUP JA ESTA NO ECRA
########################
#calculos de intersects
#Calcular colisoes de lasers entre jogadores
kill = lasers(player1,player2,playerSprite1,playerSprite2,asteroidField)
#se matou algum jogador, sai
if kill == 1:
done = True
kill = asteroids(player1,player2,playerSprite1,playerSprite2,asteroidField)
#se matou algum jogador, sai
if kill == 1:
done = True
#apanhar powerups
if powerups_on_screen == True:
retval = pickup_powerup(powerup,powerupSprite,player1,playerSprite1,powerup_available)
if retval == 1:
retval = 0
powerups_on_screen = False
if powerup.tipo == 2 and powerup.damagefactor == 4:
pickup_timer = round(time.clock())
elapsed = pickup_timer
else:
retval = pickup_powerup(powerup,powerupSprite,player2,playerSprite2,powerup_available)
if retval == 1:
retval = 0
powerups_on_screen = False
if powerup.tipo == 2 and powerup.damagefactor == 4:
pickup_timer = round(time.clock())
elapsed = pickup_timer
#############################
# Desenhar
#desenhar jogador 1
screen.blit(scorePlayer1, (10, 740))
playerSprite1.update(screen)
playerSprite1.draw(screen)
player1.draw_health(screen)
player1.draw_stats(screen)
#desenhar jogador 2
if nave2 != 0:
screen.blit(scorePlayer2, (10, 750))
playerSprite2.update(screen)
playerSprite2.draw(screen)
player2.draw_health(screen)
player2.draw_stats(screen)
#powerups
screen.blit(poweruppanel, (350, 10))
if powerups_on_screen == True:
powerupSprite.draw(screen)
#desenhar powerup_pickups
for sprite in weapon_pickups:
sprite.render(screen,False)
for sprite in health_pickups:
sprite.render(screen,False)
#desenhar asteroides
asteroidField.update()
#desenhar explosoes
for sprite in explosoes:
sprite.render(screen,False)
#desenhar humor pic
if pickup_timer != 0:
if (elapsed - pickup_timer) < 1.5:
toasty_pic, toasty_rect = load_image("toasty"+str(i)+".PNG", -1)
screen.blit(toasty_pic,(885,650))
else:
pickup_timer = 0
#Alterei o random pois o grau de aleatoriedade eh baixo
#desta forma aparecemos todos mais vezes :)
listagem=[1,2,3,4]
random.shuffle(listagem)
random.shuffle(listagem)
i = listagem[0]
pygame.display.flip()
##FIM DO WHILE
#####################################
stop_music()
pygame.display.set_mode([800,600])
return player1,player2
def main():
pygame.init()
SCREENSIZE = [800,600]
screen = pygame.display.set_mode(SCREENSIZE)
pygame.display.set_caption("Space War Evolved")
pygame.mouse.set_visible(0)
#init musica
#load_music('menu.mp3')
clock = pygame.time.Clock()
SP, rect = load_image("SP.png", -1)
MP, rect2 = load_image("MP.png", -1)
S, rect3 = load_image("S.png", -1)
H, rect4 = load_image("H.png", -1)
A, rect5 = load_image("A.png", -1)
E, rect6 = load_image("E.png", -1)
SP_red, rect = load_image("SP_red_35_433.png", -1)
MP_red, rect = load_image("MP_red_93_433.png", -1)
S_red, rect = load_image("S_red_151_478.png", -1)
H_red, rect = load_image("H_red_93_478.png", -1)
A_red, rect = load_image("A_red_151_433.png", -1)
E_red, rect = load_image("E_red_35_478.png", -1)
extra, rect = load_image("extra.png", -1)
multi = []
multi_images = load_sliced_sprites(221,34,'multi_player_anim_221x34.png')
single = []
single_images = load_sliced_sprites(243,34,'single_anim_243x34.png')
help = []
help_images = load_sliced_sprites(74,35,'help_anim_74x35.png')
about = []
about_images = load_sliced_sprites(112,29,'about_anim_112x29.png')
exit = []
exit_images = load_sliced_sprites(74,28,'exit_anim_74x28.png')
setkeys = []
setkeys_images = load_sliced_sprites(179,29,'setkeys_anim_179x29.png')
jiproj = []
jiproj_images = load_sliced_sprites(128,160,'ji_proj_128x160.png')
jiproj.append(AnimatedSprite(jiproj_images,129,31))
autores = []
autores_images = load_sliced_sprites(111,160,'autores.png')
autores.append(AnimatedSprite(autores_images,129,217))
moverCursor = load_sound('moverCursor.wav')
moverCursor.set_volume(0.2)
clock = pygame.time.Clock()
menu = RotatingMenu(x=520, y=295, radius=160, arc=pi, defaultAngle=pi/2.0)
background = Background(screen,'Stargate_menu.png')
menu.addItem(MenuItem(H))
menu.addItem(MenuItem(S))
menu.addItem(MenuItem(SP))
menu.addItem(MenuItem(MP))
menu.addItem(MenuItem(A))
menu.addItem(MenuItem(E))
menu.selectItem(2)
#Loop
while True:
#Handle events
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
return False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
moverCursor.play()
menu.selectItem(menu.selectedItemNumber + 1)
if event.key == pygame.K_RIGHT:
moverCursor.play()
menu.selectItem(menu.selectedItemNumber - 1)
if event.key == pygame.K_RETURN:
if menu.selectedItemNumber == 0:
option2()
elif menu.selectedItemNumber == 1:
option4()
elif menu.selectedItemNumber == 2:
option0()
elif menu.selectedItemNumber == 3:
option1()
elif menu.selectedItemNumber == 4:
option3()
elif menu.selectedItemNumber == 5:
option5()
return False
#Update stuff
background.update()
menu.update()
for sprite in jiproj:
sprite.render(screen,True)
for sprite in autores:
sprite.render(screen,True)
screen.blit(extra, (124,24))
if menu.selectedItemNumber == 0:
single = []
multi = []
exit = []
about = []
setkeys = []
screen.blit(H_red, (93,478))
help.append(AnimatedSprite(help_images,490,280))
elif menu.selectedItemNumber == 1:
single = []
help = []
exit = []
about = []
multi = []
screen.blit(S_red, (151,478))
setkeys.append(AnimatedSprite(setkeys_images,435,280))
elif menu.selectedItemNumber == 2:
help = []
multi = []
exit = []
about = []
setkeys = []
screen.blit(SP_red, (35,433))
single.append(AnimatedSprite(single_images,403,280))
elif menu.selectedItemNumber == 3:
single = []
help = []
exit = []
about = []
setkeys = []
screen.blit(MP_red, (93,433))
multi.append(AnimatedSprite(multi_images,410,280))
elif menu.selectedItemNumber == 4:
single = []
multi = []
exit = []
help = []
setkeys = []
screen.blit(A_red, (151,433))
about.append(AnimatedSprite(about_images,470,280))
elif menu.selectedItemNumber == 5:
single = []
multi = []
help = []
about = []
setkeys = []
screen.blit(E_red, (35,478))
exit.append(AnimatedSprite(exit_images,490,280))
for sprite in multi:
sprite.render(screen,True)
for sprite in single:
sprite.render(screen,True)
for sprite in about:
sprite.render(screen,True)
for sprite in exit:
sprite.render(screen,True)
for sprite in help:
sprite.render(screen,True)
for sprite in setkeys:
sprite.render(screen,True)
#Draw stuff
#display.fill((0,0,0))
menu.draw(screen)
pygame.display.flip() #Show the updated scene
clock.tick(fpsLimit) #Wait a little
if __name__ == "__main__":
main()
| mit | -5,715,220,330,823,720,000 | 33.178862 | 110 | 0.506185 | false |
kgullikson88/GSSP_Analyzer | gsspy/fitting.py | 1 | 19991 | from __future__ import print_function, division, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import subprocess
from astropy.io import fits
from astropy import time
import DataStructures
from ._utils import combine_orders, read_grid_points, ensure_dir
from .analyzer import GSSP_Analyzer
import logging
import glob
home = os.environ['HOME']
GSSP_EXE = '{}/Applications/GSSP/GSSP_single/GSSP_single'.format(home)
GSSP_ABUNDANCE_TABLES = '{}/Applications/GSSPAbundance_Tables/'.format(home)
GSSP_MODELS = '/media/ExtraSpace/GSSP_Libraries/LLmodels/'
class GSSP_Fitter(object):
teff_minstep = 100
logg_minstep = 0.1
feh_minstep = 0.1
vsini_minstep = 10
vmicro_minstep = 0.1
def __init__(self, filename, gssp_exe=None, abund_tab=None, models_dir=None):
"""
A python wrapper to the GSSP code (must already be installed)
Parameters:
===========
filename: string
The filename of the (flattened) fits spectrum to fit.
gssp_exe: string (optional)
The full path to the gssp executable file
abund_tab: string (optional)
The full path to the directory containing
GSSP abundance tables.
models_dir: string:
The full path to the directory containing
GSSP atmosphere models.
Methods:
==========
fit: Fit the parameters
"""
if gssp_exe is None:
gssp_exe = GSSP_EXE
if abund_tab is None:
abund_tab = GSSP_ABUNDANCE_TABLES
if models_dir is None:
models_dir = GSSP_MODELS
# Read in the file and combine the orders
orders = self._read_fits_file(filename)
combined = combine_orders(orders)
#TODO: Cross-correlate the data to get it close. GSSP might have trouble with huge RVs...
# Get the object name/date
header = fits.getheader(filename)
star = header['OBJECT']
date = header['DATE-OBS']
try:
jd = time.Time(date, format='isot', scale='utc').jd
except TypeError:
jd = time.Time('{}T{}'.format(date, header['UT']), format='isot',
scale='utc').jd
# Save the data to an ascii file
output_basename = '{}-{}'.format(star.replace(' ', ''), jd)
np.savetxt('data_sets/{}.txt'.format(output_basename),
np.transpose((combined.x, combined.y)),
fmt='%.10f')
# Save some instance variables
self.data = combined
self.jd = jd
self.starname = star
self.output_basename = output_basename
self.gssp_exe = os.path.abspath(gssp_exe)
self.abundance_table = abund_tab
self.model_dir = models_dir
self.gssp_gridpoints = read_grid_points(models_dir)
def _run_gssp(self, teff_lims=(7000, 30000), teff_step=1000,
logg_lims=(3.0, 4.5), logg_step=0.5,
feh_lims=(-0.5, 0.5), feh_step=0.5,
vsini_lims=(50, 350), vsini_step=50,
vmicro_lims=(1, 5), vmicro_step=1,
R=80000, ncores=1):
"""
Coarsely fit the parameters Teff, log(g), and [Fe/H].
"""
# First, make sure the inputs are reasonable.
teff_step = max(teff_step, self.teff_minstep)
logg_step = max(logg_step, self.logg_minstep)
feh_step = max(feh_step, self.feh_minstep)
vsini_step = max(vsini_step, self.vsini_minstep)
vmicro_step = max(vmicro_step, self.vmicro_minstep)
teff_lims = (min(teff_lims), max(teff_lims))
logg_lims = (min(logg_lims), max(logg_lims))
feh_lims = (min(feh_lims), max(feh_lims))
vsini_lims = (min(vsini_lims), max(vsini_lims))
vmicro_lims = (min(vmicro_lims), max(vmicro_lims))
teff_lims, logg_lims, feh_lims = self._check_grid_limits(teff_lims,
logg_lims,
feh_lims)
# Make the input file for GSSP
inp_file=self._make_input_file(teff_lims=teff_lims, teff_step=teff_step,
logg_lims=logg_lims, logg_step=logg_step,
feh_lims=feh_lims, feh_step=feh_step,
vsini_lims=vsini_lims, vsini_step=vsini_step,
vmicro_lims=vmicro_lims, vmicro_step=vmicro_step,
resolution=R)
# Run GSSP
subprocess.check_call(['mpirun', '-n', '{}'.format(ncores),
'{}'.format(self.gssp_exe),
'{}'.format(inp_file)])
# Move the output directory to a new name that won't be overridden
output_dir = '{}_output'.format(self.output_basename)
ensure_dir(output_dir)
for f in glob.glob('output_files/*'):
subprocess.check_call(['mv', f, '{}/'.format(output_dir)])
return
def fit(self, teff_lims=(7000, 30000), teff_step=1000,
logg_lims=(3.0, 4.5), logg_step=0.5,
feh_lims=(-0.5, 0.5), feh_step=0.5,
vsini_lims=(50, 350), vsini_step=50,
vmicro_lims=(1, 5), vmicro_step=1,
R=80000, ncores=1, refine=True):
"""
Fit the stellar parameters with GSSP
Parameters:
=============
par_lims: iterable with (at least) two objects
The limits on the given parameter. 'par' can be one of:
1. teff: The effective temperature
2. logg: The surface gravity
3. feh: The metallicity [Fe/H]
4. vsini: The rotational velocity
5. vmicro: The microturbulent velocity
The default values are a very large, very course grid.
Consider refining based on spectral type first!
par_step: float
The initial step size to take in the given parameter.
'par' can be from the same list as above.
R: float
The spectrograph resolving power (lambda/delta-lambda)
ncores: integer, default=1
The number of cores to use in the GSSP run.
refine: boolean
Should we run GSSP again with a smaller grid after the
initial fit? If yes, the best answers will probably be
better.
Returns:
=========
A pd.Series object with the best parameters
"""
# Run GSSP
self._run_gssp(teff_lims=teff_lims, teff_step=teff_step,
logg_lims=logg_lims, logg_step=logg_step,
feh_lims=feh_lims, feh_step=feh_step,
vsini_lims=vsini_lims, vsini_step=vsini_step,
vmicro_lims=vmicro_lims, vmicro_step=vmicro_step,
R=R, ncores=ncores)
# Look at the output and save the figures
output_dir = '{}_output'.format(self.output_basename)
best_pars, figs = GSSP_Analyzer(output_dir).estimate_best_parameters()
for par in figs.keys():
fig = figs[par]
fig.savefig(os.path.join(output_dir, '{}_course.pdf'.format(par)))
plt.close('all')
if not refine:
return best_pars
# If we get here, we should restrict the grid near the
# best solution and fit again
teff_lims = self._get_refined_limits(lower=best_pars['1sig_CI_lower_Teff'],
upper=best_pars['1sig_CI_upper_Teff'],
values=self.gssp_gridpoints.teff)
logg_lims = self._get_refined_limits(lower=best_pars['1sig_CI_lower_logg'],
upper=best_pars['1sig_CI_upper_logg'],
values=self.gssp_gridpoints.logg)
feh_lims = self._get_refined_limits(lower=best_pars['1sig_CI_lower_feh'],
upper=best_pars['1sig_CI_upper_feh'],
values=self.gssp_gridpoints.feh)
vsini_lower = best_pars.best_vsini*(1-1.5) + 1.5*best_pars['1sig_CI_lower_vsini']
vsini_upper = best_pars.best_vsini*(1-1.5) + 1.5*best_pars['1sig_CI_upper_vsini']
vsini_lims = (max(10, vsini_lower), min(400, vsini_upper))
vsini_step = max(self.vsini_minstep, (vsini_lims[1] - vsini_lims[0])/10)
vmicro_lims = (best_pars.micro_turb, best_pars.micro_turb)
# Rename the files in the output directory so they don't get overwritten
file_list = ['CCF.dat', 'Chi2_table.dat',
'Observed_spectrum.dat', 'Synthetic_best_fit.rgs']
ensure_dir(os.path.join(output_dir, 'course_output', ''))
for f in file_list:
original_fname = os.path.join(output_dir, f)
new_fname = os.path.join(output_dir, 'course_output', f)
subprocess.check_call(['mv', original_fname, new_fname])
# Run GSSP on the refined grid
self._run_gssp(teff_lims=teff_lims, teff_step=self.teff_minstep,
logg_lims=logg_lims, logg_step=self.logg_minstep,
feh_lims=feh_lims, feh_step=self.feh_minstep,
vsini_lims=vsini_lims, vsini_step=round(vsini_step),
vmicro_lims=vmicro_lims, vmicro_step=vmicro_step,
R=R, ncores=ncores)
best_pars, figs = GSSP_Analyzer(output_dir).estimate_best_parameters()
for par in figs.keys():
fig = figs[par]
fig.savefig(os.path.join(output_dir, '{}_fine.pdf'.format(par)))
fig.close()
return best_pars
def _check_grid_limits_old(self, teff_lims, logg_lims, feh_lims):
df = self.gssp_gridpoints[['teff', 'logg', 'feh']].drop_duplicates()
# First, check if the limits are do-able
lower = df.loc[(df.teff <= teff_lims[0]) &
(df.logg <= logg_lims[0]) &
(df.feh <= feh_lims[0])]
upper = df.loc[(df.teff >= teff_lims[1]) &
(df.logg >= logg_lims[1]) &
(df.feh >= feh_lims[1])]
if len(upper) >= 1 and len(lower) >= 1:
return teff_lims, logg_lims, feh_lims
# If we get here, there is a problem...
# Check temperature first:
if not (len(df.loc[df.teff <= teff_lims[0]]) >= 1 and
len(df.loc[df.teff >= teff_lims[1]]) >= 1):
# Temperature grid is no good.
low_teff, high_teff = df.teff.min(), df.teff.max()
print('The temperature grid is not available in the model library!')
print('You wanted temperatures from {} - {}'.format(*teff_lims))
print('The model grid extends from {} - {}'.format(low_teff, high_teff))
new_teff_lims = (max(low_teff, teff_lims[0]),
min(high_teff, teff_lims[1]))
print('Resetting temperature limits to {} - {}'.format(*new_teff_lims))
return self._check_grid_limits(new_teff_lims, logg_lims, feh_lims)
# Check log(g) next:
teff_df = df.loc[(df.teff >= teff_lims[0]) & (df.teff <= teff_lims[1])]
if not (len(teff_df.loc[df.logg <= logg_lims[0]]) >= 1 and
len(teff_df.loc[df.logg >= logg_lims[1]]) >= 1):
# Temperature grid is no good.
low_logg, high_logg = df.logg.min(), df.logg.max()
print('The log(g) grid is not available in the model library!')
print('You wanted log(g) from {} - {}'.format(*logg_lims))
print('The model grid extends from {} - {}'.format(low_logg, high_logg))
new_logg_lims = (max(low_logg, logg_lims[0]),
min(high_logg, logg_lims[1]))
print('Resetting log(g) limits to {} - {}'.format(*new_logg_lims))
return self._check_grid_limits(teff_lims, new_logg_lims, feh_lims)
# Finally, check [Fe/H]:
subset_df = df.loc[(df.teff >= teff_lims[0]) &
(df.teff <= teff_lims[1]) *
(df.logg >= logg_lims[0]) &
(df.logg <= logg_lims[1])]
if not (len(subset_df.loc[df.feh <= feh_lims[0]]) >= 1 and
len(subset_df.loc[df.feh >= feh_lims[1]]) >= 1):
# Temperature grid is no good.
low_feh, high_feh = df.feh.min(), df.feh.max()
print('The [Fe/H] grid is not available in the model library!')
print('You wanted [Fe/H] from {} - {}'.format(*feh_lims))
print('The model grid extends from {} - {}'.format(low_feh, high_feh))
new_feh_lims = (max(low_feh, feh_lims[0]),
min(high_feh, feh_lims[1]))
print('Resetting [Fe/H] limits to {} - {}'.format(*new_feh_lims))
return self._check_grid_limits(teff_lims, logg_lims, new_feh_lims)
# We should never get here
raise ValueError('Something weird happened while checking limits!')
def _check_grid_limits(self, teff_lims, logg_lims, feh_lims):
df = self.gssp_gridpoints[['teff', 'logg', 'feh']].drop_duplicates()
# First, check if the limits are do-able as is
lower = df.loc[(df.teff == teff_lims[0]) & (df.feh == feh_lims[0])]
upper = df.loc[(df.teff == teff_lims[1]) & (df.feh == feh_lims[1])]
if (lower.logg.min() <= logg_lims[0] and
lower.logg.max() >= logg_lims[1] and
upper.logg.min() <= logg_lims[0] and
upper.logg.max() >= logg_lims[1]):
return teff_lims, logg_lims, feh_lims
# If we get here, there is a problem...
# Check temperature first:
low_teff, high_teff = df.teff.min(), df.teff.max()
if low_teff > teff_lims[0] or high_teff < teff_lims[1]:
print('The temperature grid is not available in the model library!')
print('You wanted temperatures from {} - {}'.format(*teff_lims))
print('The model grid extends from {} - {}'.format(low_teff, high_teff))
new_teff_lims = (max(low_teff, teff_lims[0]),
min(high_teff, teff_lims[1]))
print('Resetting temperature limits to {} - {}'.format(*new_teff_lims))
return self._check_grid_limits(new_teff_lims, logg_lims, feh_lims)
# Check [Fe/H] next
subset_df = df.loc[(df.teff >= teff_lims[0]) &
(df.teff <= teff_lims[1])]
low_feh, high_feh = subset_df.feh.min(), subset_df.feh.max()
if low_feh > feh_lims[0] or high_feh < feh_lims[1]:
print('The [Fe/H] grid is not available in the model library!')
print('You wanted [Fe/H] from {} - {}'.format(*feh_lims))
print('The model grid extends from {} - {}'.format(low_feh, high_feh))
new_feh_lims = (max(low_feh, feh_lims[0]),
min(high_feh, feh_lims[1]))
print('Resetting [Fe/H] limits to {} - {}'.format(*new_feh_lims))
return self._check_grid_limits(teff_lims, logg_lims, new_feh_lims)
# Finally, check log(g)
subset_df = subset_df.loc[(subset_df.feh >= feh_lims[0]) &
(subset_df.feh <= feh_lims[1])]
low_logg, high_logg = subset_df.logg.min(), subset_df.logg.max()
if low_logg > logg_lims[0] or high_logg < logg_lims[1]:
print('The log(g) grid is not available in the model library!')
print('You wanted log(g) from {} - {}'.format(*logg_lims))
print('The model grid extends from {} - {}'.format(low_logg, high_logg))
new_logg_lims = (max(low_logg, logg_lims[0]),
min(high_logg, logg_lims[1]))
print('Resetting log(g) limits to {} - {}'.format(*new_logg_lims))
return self._check_grid_limits(teff_lims, new_logg_lims, feh_lims)
# We should never get here
raise ValueError('Something weird happened while checking limits!')
def _get_refined_limits(self, lower, upper, values):
"""
Get the items in the 'values' array that are just
less than lower and just more than upper.
"""
unique_values = sorted(np.unique(values))
l_idx = np.searchsorted(unique_values, lower, side='left')
r_idx = np.searchsorted(unique_values, upper, side='right')
if l_idx > 0:
l_idx -= 1
if r_idx < len(unique_values) - 1:
r_idx += 1
return unique_values[l_idx], unique_values[r_idx]
def _read_fits_file(self, fname):
orders = []
hdulist = fits.open(fname)
for i, hdu in enumerate(hdulist[1:]):
xypt = DataStructures.xypoint(x=hdu.data['wavelength'],
y=hdu.data['flux'],
cont=hdu.data['continuum'],
err=hdu.data['error'])
xypt.x *= 10 #Convert from nanometers to angstrom
orders.append(xypt)
return orders
def _make_input_file(self, teff_lims, teff_step, logg_lims, logg_step,
feh_lims, feh_step, vsini_lims, vsini_step,
vmicro_lims, vmicro_step, resolution):
""" Make the input file for the given star
"""
output_string = '{:.1f} {:.0f} {:.1f}\n'.format(teff_lims[0],
teff_step,
teff_lims[-1])
output_string += '{:.1f} {:.1f} {:.1f}\n'.format(logg_lims[0],
logg_step,
logg_lims[1])
output_string += '{:.1f} {:.1f} {:.1f}\n'.format(vmicro_lims[0],
vmicro_step,
vmicro_lims[1])
output_string += '{:.1f} {:.1f} {:.1f}\n'.format(vsini_lims[0],
vsini_step,
vsini_lims[1])
output_string += "skip 0.03 0.02 0.07 !dilution factor\n"
output_string += 'skip {:.1f} {:.1f} {:.1f}\n'.format(feh_lims[0],
feh_step,
feh_lims[1])
output_string += 'He 0.04 0.005 0.06 ! Individual abundance\n'
output_string += '0.0 {:.0f}\n'.format(resolution)
output_string += '{}\n{}\n'.format(self.abundance_table, self.model_dir)
output_string += '2 1 !atmosphere model vmicro and mass\n'
output_string += 'ST ! model atmosphere chemical composition flag\n'
dx = self.data.x[1] - self.data.x[0]
output_string += '1 {:.5f} fit\n'.format(dx)
output_string += 'data_sets/{}.txt\n'.format(self.output_basename)
output_string += '0.5 0.99 0.0 adjust ! RV determination stuff\n'
xmin, xmax = self.data.x[0]-1, self.data.x[-1]+1
output_string += '{:.1f} {:.1f}\n'.format(xmin, xmax)
outfilename = '{}.inp'.format(self.output_basename)
with open(outfilename, 'w') as outfile:
outfile.write(output_string)
return outfilename
| mit | -6,591,718,950,300,025,000 | 43.523385 | 97 | 0.516182 | false |
gussmith23/babble_bot | mangle.py | 1 | 3519 | import random
import configparser
from enum import Enum
import googletrans
# get config
config = configparser.ConfigParser()
config.read("babble_bot.cfg")
class MangleMethod(Enum):
flipflop = 1
straight = 2
manual = 3
def __str__(self):
if self == MangleMethod.flipflop:
return "flip flop: flip flop between a primary language and random languages."
elif self == MangleMethod.straight:
return "straight: run through a completely random list of languages."
elif self == MangleMethod.manual:
return "manual: language path specified by the user manually."
else:
raise NotImplementedError(
"MangleMethod value's __str__ conversion not implemented.")
class Mangle:
def __init__(self, client_key, language, low, high, language_blacklist):
self.language = language
self.translator = googletrans.Translator()
self.languages = set(googletrans.LANGUAGES.keys()) - language_blacklist
self.low = low
self.high = high
def mangle(self, message_text, times=0, method=None, language_list=None):
if method == MangleMethod.manual and not language_list:
raise ValueError("No language list given.")
if method is None:
method = random.sample(
set(MangleMethod) - set([MangleMethod.manual]), 1)[0]
if times < 0:
raise ValueError("Parameter times must be greater than 0.")
if times == 0:
times = random.randint(self.low, self.high)
if method == MangleMethod.manual:
language_list.insert(0, self.language)
language_list.append(self.language)
elif method == MangleMethod.flipflop:
language_list = []
language_list.append(self.language)
for i in range(int(times / 2)):
language_list.extend(
[random.sample(self.languages, 1)[0], self.language])
elif method == MangleMethod.straight:
language_list = []
language_list.append(self.language)
language_list.extend(random.sample(self.languages, times))
language_list.append(self.language)
else:
raise NotImplementedError(
"MangleMethod {} not implemented.".format(method))
all_messages = [message_text]
for i in range(len(language_list)):
if i == 0:
continue
try:
#text = self.translator.translate(all_messages[i - 1],
# from_lang = language_list[i - 1],
# to_lang = language_list[i])
params = {
'text': all_messages[i - 1],
'from': language_list[i - 1],
'to': language_list[i],
'contentType': 'text/plain',
'category': 'general',
}
text = self.translator.translate(params['text'],
src=params['from'],
dest=params['to']).text
all_messages.append(text)
except Exception as e:
all_messages = False
break
message_info = {
'method': str(method),
'languages': language_list,
'all_messages': all_messages
}
return message_info
| gpl-3.0 | 7,179,359,425,777,197,000 | 36.042105 | 90 | 0.539926 | false |
JuBra/GEMEditor | GEMEditor/database/ui/MetaboliteEntryDisplayWidget.py | 1 | 8059 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\MetaboliteEntryDisplayWidget.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MetaboliteEntryDisplayWidget(object):
def setupUi(self, MetaboliteEntryDisplayWidget):
MetaboliteEntryDisplayWidget.setObjectName("MetaboliteEntryDisplayWidget")
MetaboliteEntryDisplayWidget.resize(333, 465)
self.formLayout = QtWidgets.QFormLayout(MetaboliteEntryDisplayWidget)
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label)
self.label_name = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_name.sizePolicy().hasHeightForWidth())
self.label_name.setSizePolicy(sizePolicy)
self.label_name.setText("")
self.label_name.setWordWrap(True)
self.label_name.setObjectName("label_name")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.label_name)
self.label_4 = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicy)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.label_formula = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_formula.sizePolicy().hasHeightForWidth())
self.label_formula.setSizePolicy(sizePolicy)
self.label_formula.setText("")
self.label_formula.setObjectName("label_formula")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.label_formula)
self.label_2 = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.label_charge = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_charge.sizePolicy().hasHeightForWidth())
self.label_charge.setSizePolicy(sizePolicy)
self.label_charge.setText("")
self.label_charge.setObjectName("label_charge")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.label_charge)
self.label_3 = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
self.label_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.list_synonyms = QtWidgets.QListWidget(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.list_synonyms.sizePolicy().hasHeightForWidth())
self.list_synonyms.setSizePolicy(sizePolicy)
self.list_synonyms.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.list_synonyms.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.list_synonyms.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.list_synonyms.setLayoutMode(QtWidgets.QListView.SinglePass)
self.list_synonyms.setObjectName("list_synonyms")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.list_synonyms)
self.label_5 = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_5.sizePolicy().hasHeightForWidth())
self.label_5.setSizePolicy(sizePolicy)
self.label_5.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.table_identifiers = AnnotationTableWidget(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.table_identifiers.sizePolicy().hasHeightForWidth())
self.table_identifiers.setSizePolicy(sizePolicy)
self.table_identifiers.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.table_identifiers.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.table_identifiers.setWordWrap(False)
self.table_identifiers.setObjectName("table_identifiers")
self.table_identifiers.setColumnCount(0)
self.table_identifiers.setRowCount(0)
self.table_identifiers.horizontalHeader().setStretchLastSection(True)
self.table_identifiers.verticalHeader().setVisible(False)
self.table_identifiers.verticalHeader().setHighlightSections(False)
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.table_identifiers)
self.retranslateUi(MetaboliteEntryDisplayWidget)
QtCore.QMetaObject.connectSlotsByName(MetaboliteEntryDisplayWidget)
def retranslateUi(self, MetaboliteEntryDisplayWidget):
_translate = QtCore.QCoreApplication.translate
MetaboliteEntryDisplayWidget.setWindowTitle(_translate("MetaboliteEntryDisplayWidget", "Form"))
self.label.setText(_translate("MetaboliteEntryDisplayWidget", "Name:"))
self.label_4.setText(_translate("MetaboliteEntryDisplayWidget", "Formula:"))
self.label_2.setText(_translate("MetaboliteEntryDisplayWidget", "Charge:"))
self.label_3.setText(_translate("MetaboliteEntryDisplayWidget", "Synonyms:"))
self.label_5.setText(_translate("MetaboliteEntryDisplayWidget", "Identifier:"))
from GEMEditor.base.widgets import AnnotationTableWidget
| gpl-3.0 | 1,646,370,832,036,293,600 | 61.960938 | 106 | 0.755056 | false |
kickstandproject/wildcard | wildcard/dashboards/settings/password/tests.py | 1 | 3365 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Centrin Data Systems Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import NoReverseMatch # noqa
from django.core.urlresolvers import reverse # noqa
from django import http
from mox import IsA # noqa
from wildcard import api
from wildcard.test import helpers as test
# TODO(mrunge): remove, when keystone v3 supports
# change_own_password, incl. password validation
kver = api.keystone.VERSIONS.active
if kver == 2:
INDEX_URL = reverse('horizon:settings:password:index')
class ChangePasswordTests(test.TestCase):
@test.create_stubs({api.keystone: ('user_update_own_password', )})
def test_change_password(self):
if kver == 3:
self.skipTest('Password change in keystone v3 unsupported')
api.keystone.user_update_own_password(IsA(http.HttpRequest),
'oldpwd',
'normalpwd',).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'PasswordForm',
'current_password': 'oldpwd',
'new_password': 'normalpwd',
'confirm_password': 'normalpwd'}
res = self.client.post(INDEX_URL, formData)
self.assertNoFormErrors(res)
def test_change_validation_passwords_not_matching(self):
if kver == 3:
self.skipTest('Password change in keystone v3 unsupported')
formData = {'method': 'PasswordForm',
'current_password': 'currpasswd',
'new_password': 'testpassword',
'confirm_password': 'doesnotmatch'}
res = self.client.post(INDEX_URL, formData)
self.assertFormError(res, "form", None, ['Passwords do not match.'])
@test.create_stubs({api.keystone: ('user_update_own_password', )})
def test_change_password_shows_message_on_login_page(self):
if kver == 3:
self.skipTest('Password change in keystone v3 unsupported')
api.keystone.user_update_own_password(IsA(http.HttpRequest),
'oldpwd',
'normalpwd').AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'PasswordForm',
'current_password': 'oldpwd',
'new_password': 'normalpwd',
'confirm_password': 'normalpwd'}
res = self.client.post(INDEX_URL, formData, follow=True)
info_msg = "Password changed. Please log in again to continue."
self.assertContains(res, info_msg)
def test_on_keystone_v3_disabled(self):
try:
reverse('horizon:settings:password:index')
except NoReverseMatch:
pass
| apache-2.0 | 1,859,885,490,701,099,800 | 38.588235 | 78 | 0.615156 | false |
highlander12rus/whatsupmoscow.ru | demon/main.py | 1 | 3580 | # -*- coding: utf-8 -*-
__author__ = 'meanwhile'
import ssl
import time
import socket
import sys
import logging
import vkontakte
import ProvaderStorage
import Constants
import FileWriter
import ProccessingResponce
import daemon
class VkParserDemon(daemon.Daemon):
def run(self):
#read code for method vk.executin from file
codeFromFile = ''
with open(Constants.Constants.getFileCodeExecute(), 'r') as f:
codeFromFile = f.read()
#read access token from file
access_tokens = [];
with open(Constants.Constants.getFileAccessToken(), 'r') as f:
access_tokens = [token.strip() for token in f]
isValidToken = False;
for acces_token in access_tokens:
try:
vk = vkontakte.API(token=acces_token)
vk.getServerTime() #проверяем соединилось ли
isValidToken = True
break
except vkontakte.VKError, e:
logging.error("vkontakte.VKError ")
except ssl.SSLError, e: #The handshake operation timed out
logging.error("ssl error")
time.sleep(1)
access_tokens.append(acces_token)
if (isValidToken):
storage = ProvaderStorage.ProvaderStorage()
lastTime = vk.getServerTime()
emptyLastTime = 0;
while True:
try:
time.sleep(Constants.Constants.getTimeOutInSec())
codeSending = codeFromFile.replace('%time_replace%', str(lastTime))
json = vk.execute(code=codeSending, timeout=10)
logging.debug("vk_json responce ", json)
fileName = Constants.Constants.getDirHomeScript() + str(time.strftime("%d-%m-%Y")) + ".vkr" #vk raw
file = FileWriter.FileWriterBinary(fileName)
process = ProccessingResponce.ProccessingResponce(storage, file)
process.jsonParse(json)
if json['max_time'] > 0:
lastTime = json['max_time'] + 1
else:
logging.debug("empty json= ", json)
logging.debug("lastTime= ", lastTime)
logging.debug("complidet proccessing")
except ssl.SSLError, e:
logging.error("ssl error")
except socket.timeout, e:
logging.error("socket.timeout")
except vkontakte.VKError, e:
logging.error("vkontakte.VKError")
except AttributeError, e:
logging.error("AttributeError")
else:
#TODO: send emails tokens no correct
logging.error("token uncorrect")
if __name__ == "__main__":
logging.basicConfig(format=u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s',
level=logging.ERROR)
daemon = VkParserDemon('/tmp/daemon-example.pid', stdout='/var/log/vk_parser/stdout.log',
stderr='/var/log/vk_parser/error.log')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
| apache-2.0 | 35,188,145,789,785,108 | 34.58 | 119 | 0.540472 | false |
rmac75/mboxparser | mbox.py | 1 | 3399 | #!/usr/bin/python2
#--------------------------------
#Takes in mbox, spits out csv with email info and basic geolocation, plus other header fields.
#--------------------------------
#This product includes GeoLite2 data created by MaxMind, available from
#<a href="http://www.maxmind.com">http://www.maxmind.com</a>.
import mailbox
import sys
import csv
import re
from os import path
import pprint
import argparse
import geoip2.database
import geoip2.errors
import pygeoip
import email.utils
from email.utils import getaddresses
def get_iprecord(ip):
try:
geo = reader.city(ip)
org = reader2.org_by_addr(ip)
except (geoip2.errors.AddressNotFoundError, ValueError):
return None,None,None
if geo.city.name:
cityname=geo.city.name.encode('ascii','ignore')
else:
cityname=geo.city.name
return geo.country.iso_code, cityname, org
def main():
# first some sanity tests on the command-line arguments
#sys.argv = ['mbox_to_mysql','list1.mbox','mailman','lists',] # !@!@! APS here for testing purposes only - comment out for live run
parser = argparse.ArgumentParser(description='Parse mbox file')
parser.add_argument('mbox', help='mbox file to parse')
parser.add_argument('outfile', help='output csv file')
args = parser.parse_args()
if not path.isfile(args.mbox):
parser.error("the file %s does not exist"%args.mbox)
mbox = args.mbox
outfile = args.outfile
ipPattern = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
global reader
reader = geoip2.database.Reader('geo/GeoLite2-City.mmdb')
global reader2
reader2 = pygeoip.GeoIP('geo/GeoIPOrg.dat')
f = open(outfile, 'wt')
try:
writer = csv.writer(f)
writer.writerow( ('Date','From','From Email','Return-Path Email','To','To Email','Recipients','X-To','Subject','Received-Last','Org','City', 'Country','X-IP','X-Org', 'X-City', 'X-Country','X-Mailer'))
for message in mailbox.mbox(mbox):
From = str(message['From'])
fname,femail = email.utils.parseaddr(From)
#print fname
Return = str(message['Return-Path'])
rname,remail = email.utils.parseaddr(Return)
#print remail
To = str(message['To'])
tname,temail = email.utils.parseaddr(To)
tos = message.get_all('to', [])
ccs = message.get_all('cc', [])
resent_tos = message.get_all('resent-to', [])
resent_ccs = message.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
XTo = str(message['X-Apparently-To'])
#findIP = re.findall(ipPattern,s)
Date = str(message['Date'])
Subject = str(message['Subject'])
Received = re.findall(ipPattern,str(message['Received']))
if Received:
#print Received[-1]
country, city, org = get_iprecord(Received[-1])
#print get_iprecord(Received[-1])
#print org
else:
Received = "None"
XIP = message['X-Originating-IP']
if XIP:
XIP = str(XIP).strip('[]')
#print ("XIP: %s." % XIP)
Xcountry, Xcity, Xorg = get_iprecord(XIP)
else:
XIP = "None"
Xcountry = "None"
Xcity = "None"
Xorg = "None"
XMailer = str(message['X-Mailer'])
#Attachment = message.get_filename()
#Body = str(message['Body'])
writer.writerow((Date,fname,femail,remail,tname,temail,all_recipients,XTo,Subject,Received[-1],org,city,country,XIP,Xorg,Xcity,Xcountry,XMailer))
finally:
f.close()
#print open(sys.argv[1], 'rt').read()
if __name__ == '__main__':
main()
| gpl-2.0 | -8,993,837,695,608,072,000 | 28.556522 | 204 | 0.657546 | false |
enriquecoronadozu/HMPy | src/borrar/modificar/hmpy.py | 1 | 6228 | #!/usr/bin/env python
"""@See preprocessed data
"""
from numpy import*
import matplotlib.pyplot as plt
from GestureModel import*
from Creator import*
from Classifier import*
def plotResults(gr_points,gr_sig, b_points,b_sig,name_model):
from scipy import linalg
import matplotlib.pyplot as plt
gr_points = gr_points.transpose()
b_points = b_points.transpose()
gr_sigma = []
b_sigma = []
n,m = gr_points.shape
maximum = zeros((m))
minimum = zeros((m))
x = arange(0,m,1)
for i in range(m):
gr_sigma.append(gr_sig[i*3:i*3+3])
b_sigma.append(b_sig[i*3:i*3+3])
for i in range(m):
sigma = 3.*linalg.sqrtm(gr_sigma[i])
maximum[i] = gr_points[0,i]+ sigma[0,0];
minimum[i] = gr_points[0,i]- sigma[0,0];
fig2 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, gr_points[0])
plt.savefig(name_model+ "_gravity_x_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(gr_sigma[i])
maximum[i] = gr_points[1,i]+ sigma[1,1];
minimum[i] = gr_points[1,i]- sigma[1,1];
fig3 = plt.figure()
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, gr_points[1])
plt.savefig(name_model+ "_gravity_y_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(gr_sigma[i])
maximum[i] = gr_points[2,i]+ sigma[2,2];
minimum[i] = gr_points[2,i]- sigma[2,2];
fig3 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, gr_points[2])
plt.savefig(name_model+ "_gravity_z_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(b_sigma[i])
maximum[i] = b_points[0,i]+ sigma[0,0];
minimum[i] = b_points[0,i]- sigma[0,0];
fig4 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, b_points[0])
plt.savefig(name_model+ "_body_x_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(b_sigma[i])
maximum[i] = b_points[1,i]+ sigma[1,1];
minimum[i] = b_points[1,i]- sigma[1,1];
fig5 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, b_points[1])
plt.savefig(name_model+ "_body_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(b_sigma[i])
maximum[i] = b_points[2,i]+ sigma[2,2];
minimum[i] = b_points[2,i]- sigma[2,2];
fig6 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, b_points[2])
plt.savefig(name_model+ "_body_z_axis.png")
#NOTE: Add path
def newModel(name,files):
g = Creator()
#Read the data
g.ReadFiles(files,[])
g.CreateDatasets_Acc()
g.ObtainNumberOfCluster()
gravity = g.gravity
K_gravity = g.K_gravity
body = g.body
K_body = g.K_body
# 2) define the number of points to be used in GMR
# (current settings allow for CONSTANT SPACING only)
numPoints = amax(gravity[0,:]);
scaling_factor = 10/10;
numGMRPoints = math.ceil(numPoints*scaling_factor);
# 3) perform Gaussian Mixture Modelling and Regression to retrieve the
# expected curve and associated covariance matrices for each feature
gr_points, gr_sigma = g.GetExpected(gravity,K_gravity,numGMRPoints)
b_points, b_sigma = g.GetExpected(body,K_body,numGMRPoints)
savetxt(name+"MuGravity.txt", gr_points,fmt='%.12f')
savetxt(name+"SigmaGravity.txt", gr_sigma,fmt='%.12f')
savetxt(name+"MuBody.txt", b_points,fmt='%.12f')
savetxt(name+"SigmaBody.txt", b_sigma,fmt='%.12f')
def loadModel(file_name, th=1, plot=True):
#Load files
gr_points = loadtxt(file_name+"MuGravity.txt")
gr_sigma = loadtxt(file_name+"SigmaGravity.txt")
b_points = loadtxt(file_name+"MuBody.txt")
b_sigma = loadtxt(file_name+"SigmaBody.txt")
#Add model
gm = GestureModel()
gm.addModel("gravity",gr_points, gr_sigma,th)
gm.addModel("body",b_points, b_sigma,th)
if plot == True:
plotResults(gr_points,gr_sigma, b_points,b_sigma,file_name)
return gm
name_models = ['A','B','S1','S2']
num_samples = [10,14,9,10]
th = [25,20,10,65]
create_models = False
list_files = []
#Create a list of the list of files for each model
print "Defining files"
i = 0
for name in name_models:
files = []
for k in range(1,num_samples[i]+1):
files.append('Models/' + name + '/data/mod('+ str(k) + ').txt')
list_files.append(files)
i = i + 1
#Create the models and save the list of files for calculate the weigths
if(create_models == True):
print "Creating models"
i = 0
for model in name_models:
print list_files[i]
newModel(model,list_files[i])
i = i + 1
list_models = []
print "Loading models"
#Load the models
for j in range(len(name_models)):
#For the moment don't put True is there are more that 2 models in Ubuntu
gm = loadModel(name_models[j],th[j],False)
list_models.append(gm)
print "Calculating weigths"
#Used to calculate the weights
v0 = Classifier()
for j in range(len(name_models)):
print "\nFor model " + name_models[j] + ":"
w_g, w_b = v0.calculateW(list_files[j],list_models[j])
list_models[j].addWeight("gravity",w_g)
list_models[j].addWeight("body",w_b)
print "\n Init classifers"
l_class = []
for j in range(len(name_models)):
l_class.append(Classifier())
print "Give the model to each classifier"
for j in range(len(name_models)):
l_class[j].classify(list_models[j])
print "Validation"
sfile = "validation/mix3.txt"
import matplotlib.pyplot as plt
fig = plt.figure()
for j in range(len(name_models)):
poss = l_class[j].validate_from_file(sfile, ',')
m,n = poss.shape
x = arange(0,m,1)
plt.plot(x, poss,'o',label= name_models[j])
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.savefig("result.png")
print "Finish ..."
| gpl-3.0 | -8,411,053,304,711,118,000 | 25.278481 | 77 | 0.617213 | false |
jendrikseipp/rednotebook-elementary | rednotebook/util/markup.py | 1 | 16346 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Copyright (c) 2009 Jendrik Seipp
#
# RedNotebook is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RedNotebook is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with RedNotebook; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------
import logging
import os
import re
import sys
from gi.repository import GObject
from gi.repository import Pango
from rednotebook.external import txt2tags
from rednotebook.data import HASHTAG
from rednotebook.util import filesystem
# Linebreaks are only allowed at line ends
REGEX_LINEBREAK = r'\\\\[\s]*$'
REGEX_HTML_LINK = r'<a.*?>(.*?)</a>'
# pic [""/home/user/Desktop/RedNotebook pic"".png]
PIC_NAME = r'\S.*?\S|\S'
PIC_EXT = r'(?:png|jpe?g|gif|eps|bmp|svg)'
REGEX_PIC = re.compile(r'(\["")(%s)("")(\.%s)(\?\d+)?(\])' % (PIC_NAME, PIC_EXT), flags=re.I)
# named local link [my file.txt ""file:///home/user/my file.txt""]
# named link in web [heise ""http://heise.de""]
REGEX_NAMED_LINK = re.compile(r'(\[)(.*?)(\s"")(\S.*?\S)(""\])', flags=re.I)
ESCAPE_COLOR = r'XBEGINCOLORX\1XSEPARATORX\2XENDCOLORX'
COLOR_ESCAPED = r'XBEGINCOLORX(.*?)XSEPARATORX(.*?)XENDCOLORX'
TABLE_HEAD_BG = '#aaa'
CHARSET_UTF8 = '<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />'
PRINT_FUNCTION = '<script></script>'
CSS = """\
<style type="text/css">
body {
font-family: %(font)s;
}
<!-- Don't split last line between pages.
This fix is only supported by Opera -->
p {
page-break-inside: avoid;
}
blockquote {
margin: 1em 2em;
border-left: 2px solid #999;
font-style: oblique;
padding-left: 1em;
}
blockquote:first-letter {
margin: .2em .1em .1em 0;
font-size: 160%%;
font-weight: bold;
}
blockquote:first-line {
font-weight: bold;
}
table {
border-collapse: collapse;
}
td, th {
<!--border: 1px solid #888;--> <!--Allow tables without borders-->
padding: 3px 7px 2px 7px;
}
th {
text-align: left;
padding-top: 5px;
padding-bottom: 4px;
background-color: %(table_head_bg)s;
color: #ffffff;
}
hr.heavy {
height: 2px;
background-color: black;
}
</style>
"""
# MathJax
FORMULAS_SUPPORTED = True
MATHJAX_FILE = 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js'
# Explicitly setting inlineMath: [ ['\\(','\\)'] ] doesn't work.
# Using defaults:
# displayMath: [ ['$$','$$'], ['\[','\]'] ]
# inlineMath: [['\(','\)']]
MATHJAX_DELIMITERS = ['$$', '\\(', '\\)', r'\\[', '\\]']
MATHJAX = """\
<script type="text/x-mathjax-config">
MathJax.Hub.Config({{
messageStyle: "none",
config: ["MMLorHTML.js"],
jax: ["input/TeX","input/MathML","output/HTML-CSS","output/NativeMML"],
tex2jax: {{}},
extensions: ["tex2jax.js","mml2jax.js","MathMenu.js","MathZoom.js"],
TeX: {{
extensions: ["AMSmath.js","AMSsymbols.js","noErrors.js","noUndefined.js"]
}}
}});
</script>
<script type="text/javascript" src="{MATHJAX_FILE}"></script>
""".format(**locals())
def convert_categories_to_markup(categories, with_category_title=True):
# Only add Category title if the text is displayed
if with_category_title:
markup = '== %s ==\n' % _('Tags')
else:
markup = ''
for category, entry_list in categories.items():
markup += '- ' + category + '\n'
for entry in entry_list:
markup += ' - ' + entry + '\n'
markup += '\n\n'
return markup
def get_markup_for_day(day, with_text=True, with_tags=True, categories=None, date=None):
'''
Used for exporting days
'''
export_string = ''
# Add date if it is not None and not the empty string
if date:
export_string += '= %s =\n\n' % date
# Add text
if with_text:
export_string += day.text
# Add Categories
category_content_pairs = day.get_category_content_pairs()
if with_tags and categories:
categories = [word.lower() for word in categories]
export_categories = dict((x, y) for (x, y) in category_content_pairs.items()
if x.lower() in categories)
elif with_tags and categories is None:
# No restrictions
export_categories = category_content_pairs
else:
# "Export no categories" selected
export_categories = []
if export_categories:
export_string += '\n\n\n' + convert_categories_to_markup(
export_categories, with_category_title=with_text)
elif with_text:
export_string += '\n\n'
# Only return the string, when there is text or there are categories
# We don't want to list empty dates
if export_categories or with_text:
export_string += '\n\n\n'
return export_string
return ''
def _get_config(target, options):
config = {}
# Set the configuration on the 'config' dict.
config = txt2tags.ConfigMaster()._get_defaults()
config['outfile'] = txt2tags.MODULEOUT # results as list
config['target'] = target
# The Pre (and Post) processing config is a list of lists:
# [ [this, that], [foo, bar], [patt, replace] ]
config['postproc'] = []
config['preproc'] = []
config['style'] = []
# Allow line breaks, r'\\\\' are 2 \ for regexes
config['preproc'].append([REGEX_LINEBREAK, 'LINEBREAK'])
# Highlight hashtags.
if target == 'tex':
config['preproc'].append([HASHTAG.pattern, r'\1{\2\3BEGININDEX\3ENDINDEX|color:red}'])
else:
config['preproc'].append([HASHTAG.pattern, r'\1{\2\3|color:red}'])
# Escape color markup.
config['preproc'].append([r'\{(.*?)\|color:(.+?)\}', ESCAPE_COLOR])
if target in ['xhtml', 'html']:
config['encoding'] = 'UTF-8' # document encoding
config['toc'] = 0
config['css-sugar'] = 1
# Fix encoding for export opened in firefox
config['postproc'].append([r'<head>', '<head>' + CHARSET_UTF8])
# Line breaks
config['postproc'].append([r'LINEBREAK', '<br />'])
# Apply image resizing
config['postproc'].append([r'src=\"WIDTH(\d+)-', r'width="\1" src="'])
# {{red text|color:red}} -> <span style="color:red">red text</span>
config['postproc'].append([COLOR_ESCAPED, r'<span style="color:\2">\1</span>'])
elif target == 'tex':
config['encoding'] = 'utf8'
config['preproc'].append(['€', 'Euro'])
# Latex only allows whitespace and underscores in filenames if
# the filename is surrounded by "...". This is in turn only possible
# if the extension is omitted.
config['preproc'].append([r'\[""', r'["""'])
config['preproc'].append([r'""\.', r'""".'])
scheme = 'file:///' if sys.platform == 'win32' else 'file://'
# For images we have to omit the file:// prefix
config['postproc'].append([r'includegraphics\{(.*)"%s' % scheme, r'includegraphics{"\1'])
# Special handling for LOCAL file links (Omit scheme, add run:)
# \htmladdnormallink{file.txt}{file:///home/user/file.txt}
# -->
# \htmladdnormallink{file.txt}{run:/home/user/file.txt}
config['postproc'].append([r'htmladdnormallink\{(.*)\}\{%s(.*)\}' % scheme,
r'htmladdnormallink{\1}{run:\2}'])
# Line breaks
config['postproc'].append([r'LINEBREAK', r'\\\\'])
# Apply image resizing
config['postproc'].append([r'includegraphics\{("?)WIDTH(\d+)-', r'includegraphics[width=\2px]{\1'])
# We want the plain latex formulas unescaped.
# Allowed formulas: $$...$$, \[...\], \(...\)
config['preproc'].append([r'\\\[\s*(.+?)\s*\\\]', r"BEGINEQUATION''\1''ENDEQUATION"])
config['preproc'].append([r'\$\$\s*(.+?)\s*\$\$', r"BEGINEQUATION''\1''ENDEQUATION"])
config['postproc'].append([r'BEGINEQUATION(.+)ENDEQUATION', r'$$\1$$'])
config['preproc'].append([r'\\\(\s*(.+?)\s*\\\)', r"BEGINMATH''\1''ENDMATH"])
config['postproc'].append([r'BEGINMATH(.+)ENDMATH', r'$\1$'])
# Fix utf8 quotations - „, “ and ” cause problems compiling the latex document.
config['postproc'].extend([[u'„', '"'], [u'”', '"'], [u'“', '"']])
# Enable index.
config['style'].append('makeidx')
config['postproc'].append([r'BEGININDEX(.+?)ENDINDEX', r'\\index{\1}'])
config['postproc'].append(['begin{document}', 'makeindex\n\\\\begin{document}'])
config['postproc'].append(['end{document}', 'printindex\n\n\\\\end{document}'])
config['postproc'].append([COLOR_ESCAPED, r'\\textcolor{\2}{\1}'])
elif target == 'txt':
# Line breaks
config['postproc'].append([r'LINEBREAK', '\n'])
# Apply image resizing ([WIDTH400-file:///pathtoimage.jpg])
config['postproc'].append([r'\[WIDTH(\d+)-(.+)\]', r'[\2?\1]'])
# Allow resizing images by changing
# [filename.png?width] to [WIDTHwidth-filename.png]
img_ext = r'png|jpe?g|gif|eps|bmp|svg'
img_name = r'\S.*\S|\S'
# Apply this prepoc only after the latex image quotes have been added
config['preproc'].append([r'\[(%s\.(%s))\?(\d+)\]' % (img_name, img_ext), r'[WIDTH\3-\1]'])
# Disable colors for all other targets.
config['postproc'].append([COLOR_ESCAPED, r'\1'])
# MathJax
if options.pop('add_mathjax'):
config['postproc'].append([r'</body>', MATHJAX + '</body>'])
config['postproc'].append([r'</body>', PRINT_FUNCTION + '</body>'])
# Custom css
fonts = options.pop('font', 'sans-serif')
if 'html' in target:
css = CSS % {'font': fonts, 'table_head_bg': TABLE_HEAD_BG}
config['postproc'].append([r'</head>', css + '</head>'])
config.update(options)
return config
def _convert_paths(txt, data_dir):
def _convert_uri(uri):
path = uri[len('file://'):] if uri.startswith('file://') else uri
# Check if relative file exists and convert it if it does.
if (not any(uri.startswith(proto) for proto in filesystem.REMOTE_PROTOCOLS) and
not os.path.isabs(path)):
path = os.path.join(data_dir, path)
assert os.path.isabs(path), path
if os.path.exists(path):
uri = filesystem.get_local_url(path)
return uri
def _convert_pic_path(match):
uri = _convert_uri(match.group(2) + match.group(4))
# Reassemble picture markup.
name, ext = os.path.splitext(uri)
parts = [match.group(1), name, match.group(3), ext]
if match.group(5) is not None:
parts.append(match.group(5))
parts.append(match.group(6))
return ''.join(parts)
def _convert_file_path(match):
uri = _convert_uri(match.group(4))
# Reassemble link markup
parts = [match.group(i) for i in range(1, 6)]
parts[3] = uri
return ''.join(parts)
txt = REGEX_PIC.sub(_convert_pic_path, txt)
txt = REGEX_NAMED_LINK.sub(_convert_file_path, txt)
return txt
def convert(txt, target, data_dir, headers=None, options=None):
'''
Code partly taken from txt2tags tarball
'''
options = options or {}
# Only add MathJax code if there is a formula.
options['add_mathjax'] = (
FORMULAS_SUPPORTED and
'html' in target and
any(x in txt for x in MATHJAX_DELIMITERS))
logging.debug('Add mathjax code: %s' % options['add_mathjax'])
# Turn relative paths into absolute paths.
txt = _convert_paths(txt, data_dir)
# The body text must be a list.
txt = txt.split('\n')
# Set the three header fields
if headers is None:
if target == 'tex':
# LaTeX requires a title if \maketitle is used
headers = ['RedNotebook', '', '']
else:
headers = ['', '', '']
config = _get_config(target, options)
# Let's do the conversion
try:
headers = txt2tags.doHeader(headers, config)
body, toc = txt2tags.convert(txt, config)
footer = txt2tags.doFooter(config)
toc = txt2tags.toc_tagger(toc, config)
toc = txt2tags.toc_formatter(toc, config)
full_doc = headers + toc + body + footer
finished = txt2tags.finish_him(full_doc, config)
result = '\n'.join(finished)
# Txt2tags error, show the messsage to the user
except txt2tags.error as msg:
logging.error(msg)
result = msg
# Unknown error, show the traceback to the user
except:
result = (
'<b>Error</b>: This day contains invalid '
'<a href="http://txt2tags.org/markup.html">txt2tags markup</a>. '
'You can help us fix this by submitting a bugreport in the '
'<a href="https://code.google.com/p/txt2tags/issues/list">'
'txt2tags bugtracker</a>. Please append the day\'s text to the issue.')
logging.error('Invalid markup:\n%s' % txt2tags.getUnknownErrorMessage())
return result
def convert_to_pango(txt, headers=None, options=None):
'''
Code partly taken from txt2tags tarball
'''
original_txt = txt
# Here is the marked body text, it must be a list.
txt = txt.split('\n')
# Set the three header fields
if headers is None:
headers = ['', '', '']
config = txt2tags.ConfigMaster()._get_defaults()
config['outfile'] = txt2tags.MODULEOUT # results as list
config['target'] = 'xhtml'
config['preproc'] = []
# We need to escape the ampersand here, otherwise "&" would become
# "&amp;"
config['preproc'].append([r'&', '&'])
# Allow line breaks
config['postproc'] = []
config['postproc'].append([REGEX_LINEBREAK, '\n'])
if options is not None:
config.update(options)
# Let's do the conversion
try:
body, toc = txt2tags.convert(txt, config)
full_doc = body
finished = txt2tags.finish_him(full_doc, config)
result = ''.join(finished)
# Txt2tags error, show the messsage to the user
except txt2tags.error as msg:
logging.error(msg)
result = msg
# Unknown error, show the traceback to the user
except:
result = txt2tags.getUnknownErrorMessage()
logging.error(result)
# remove unwanted paragraphs
result = result.replace('<p>', '').replace('</p>', '')
logging.log(5, 'Converted "%s" text to "%s" txt2tags markup' %
(repr(original_txt), repr(result)))
# Remove unknown tags (<a>)
def replace_links(match):
"""Return the link name."""
return match.group(1)
result = re.sub(REGEX_HTML_LINK, replace_links, result)
try:
Pango.parse_markup(result, -1, "0")
# result is valid pango markup, return the markup.
return result
except GObject.GError:
# There are unknown tags in the markup, return the original text
logging.debug('There are unknown tags in the markup: %s' % result)
return original_txt
def convert_from_pango(pango_markup):
original_txt = pango_markup
replacements = dict((
('<b>', '**'), ('</b>', '**'),
('<i>', '//'), ('</i>', '//'),
('<s>', '--'), ('</s>', '--'),
('<u>', '__'), ('</u>', '__'),
('&', '&'),
('<', '<'), ('>', '>'),
('\n', r'\\'),
))
for orig, repl in replacements.items():
pango_markup = pango_markup.replace(orig, repl)
logging.log(5, 'Converted "%s" pango to "%s" txt2tags' %
(repr(original_txt), repr(pango_markup)))
return pango_markup
| gpl-2.0 | 8,180,963,156,431,701,000 | 32.743802 | 107 | 0.578802 | false |
SportySpice/Collections | src/file/File.py | 1 | 3600 | import xbmc
import xbmcvfs
import Folder
import urllib
import urlparse
NAME_QUERY = 'fileName'
FOLDER_NAME_QUERY = 'folderName'
FOLDER_PATH_QUERY = 'folderPath'
class File(object):
def __init__(self, name, folder):
self.name = name
self.folder = folder
self.path = folder.fullpath
self.fullpath = folder.fullpath + '/' + name
if '.' in name:
self.soleName, self.extension = name.split('.', 1)
else:
self.soleName = name
self.extension = None
self._pathTranslated = None
self._fullpathTranslated = None
def exists(self):
return xbmcvfs.exists(self.fullpath)
def delete(self):
xbmcvfs.delete(self.fullpath)
def deleteIfExists(self):
if self.exists():
self.delete()
def pathTranslated(self):
return self.folder.fullpathTranslated()
def fullpathTranslated(self):
if self._fullpathTranslated is None:
self._fullpathTranslated = xbmc.translatePath(self.fullpath)
return self._fullpathTranslated
def fileHandler(self, write=False):
if write:
permission = 'w'
else:
permission = 'r'
fullpath = self.fullpathTranslated()
return xbmcvfs.File(fullpath, permission)
def contents(self):
fh = self.fileHandler();
contents = fh.read()
fh.close()
return contents
def lines(self):
contents = self.contents()
return contents.split('\n')
def write(self, contentsStr):
fh = self.fileHandler(write=True)
fh.write(contentsStr)
fh.close()
def encodedQuery(self):
query = urllib.urlencode({NAME_QUERY: self.name,
FOLDER_NAME_QUERY: self.folder.name,
FOLDER_PATH_QUERY: self.folder.path
})
return query
def dumpObject(self, dumpObject):
import dill as pickle
with open(self.fullpathTranslated(), 'wb') as f:
pickle.dump(dumpObject, f)
def loadObject(self):
import dill as pickle
with open(self.fullpathTranslated(),'rb') as f:
loadedObject = pickle.load(f)
return loadedObject
def fromQuery(query):
parsedQuery = urlparse.parse_qs(query)
name = parsedQuery[NAME_QUERY][0]
folderName = parsedQuery[FOLDER_NAME_QUERY][0]
folderPath = parsedQuery[FOLDER_PATH_QUERY][0]
folder = Folder.Folder(folderName, folderPath)
newFile = File(name, folder)
return newFile
def fromFullpath(fullpath):
folderPath, folderName, fileName = fullpath.rsplit('/', 2)
folder = Folder.Folder(folderName, folderPath)
newFile = File(fileName, folder)
return newFile
def fromNameAndDir(fileName, dirPath):
folder = Folder.fromFullpath(dirPath)
newFile = File(fileName, folder)
return newFile
def fromInvalidNameAndDir(originalName, dirPath):
import utils
name = utils.createValidName(originalName)
return fromNameAndDir(name, dirPath)
def loadObjectFromFP(fullpath):
dumpFile = fromFullpath(fullpath)
return dumpFile.loadObject() | gpl-2.0 | 3,374,206,890,298,877,400 | 21.36646 | 72 | 0.556667 | false |
ZhangJun-GitHub/Cycle | dialogs.py | 1 | 20748 | #====================================================
# Cycle - calendar for women
# Distributed under GNU Public License
# Original author: Oleg S. Gints
# Maintainer: Matt Molyneaux ([email protected])
# Home page: http://moggers.co.uk/cgit/cycle.git/about
#===================================================
import os
import wx
import wx.html
import cPickle
from cal_year import cycle , Val
from save_load import Load_Cycle, get_f_name, set_color_default
from set_dir import *
#---------------------------------------------------------------------------
class Settings_Dlg(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, _('Settings'), wx.DefaultPosition)
self.Centre(wx.BOTH)
#======================
box = wx.BoxSizer(wx.VERTICAL)
b1 = wx.StaticBoxSizer(wx.StaticBox(self, -1, _('Length of cycle')), wx.VERTICAL)
i = wx.NewId()
self.cb1 = wx.CheckBox(self, i, _(' by average'), style=wx.NO_BORDER)
b1.Add(self.cb1, 0, wx.ALL, 5)
self.Bind(wx.EVT_CHECKBOX, self.By_Average, id=i)
self.cb1.SetValue(cycle.by_average)
b2 = wx.BoxSizer(wx.HORIZONTAL)
i = wx.NewId()
self.sc = wx.SpinCtrl(self, i, "", size=wx.Size(50, -1))
self.sc.SetRange(21, 35)
self.sc.SetValue(cycle.period)
self.sc.Enable(not self.cb1.GetValue())
b2.Add(self.sc, 0)
b2.Add(wx.StaticText(self, -1, _(' days in cycle')), 0)
b1.Add(b2, 0, wx.ALL, 5)
box.Add(b1, 0, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, 10)
#======================
self.rb = wx.RadioBox(self, -1, _('Display'),
choices = [_('fertile days'), _('none')],
majorDimension=1, style=wx.RA_SPECIFY_COLS)
box.Add(self.rb, 0, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, 10)
self.rb.SetSelection(cycle.disp)
#======================
self.rb1 = wx.RadioBox(self, -1, _('First week day'),
choices=[_('monday'), _('sunday')],
majorDimension=1, style=wx.RA_SPECIFY_COLS)
box.Add(self.rb1, 0, wx.EXPAND | wx.ALL, 10)
self.rb1.SetSelection(cycle.first_week_day)
#======================
i = wx.NewId()
txt1 = _('Colours')
txt2 = _('Change password')
w1, h = self.GetTextExtent(txt1)
w2, h = self.GetTextExtent(txt2)
w = max(w1, w2)
box.Add(wx.Button(self, i, txt1, size=wx.Size(w+10, -1)), 0, wx.ALIGN_CENTER)
self.Bind(wx.EVT_BUTTON, self.OnColours, id=i)
#======================
i = wx.NewId()
box.Add(wx.Button(self, i, txt2, size=wx.Size(w + 10, -1)), 0, wx.TOP | wx.ALIGN_CENTER, 10)
self.Bind(wx.EVT_BUTTON, self.OnChangePasswd, id=i)
#======================
but_box = wx.BoxSizer(wx.HORIZONTAL)
i = wx.NewId()
but_box.Add(wx.Button(self, i, _('Ok')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
i = wx.NewId()
but_box.Add(wx.Button(self, i, _('Cancel')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=i)
box.Add(but_box, 0, wx.ALIGN_CENTER)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
def By_Average(self, event):
if event.Checked():
self.sc.Enable(False)
else:
self.sc.Enable(True)
def OnOk(self, event):
if not 21 <= self.sc.GetValue() <= 35:
dlg = wx.MessageDialog(self, _('Period of cycle is invalid!'),
_('Error!'), wx.OK | wx.ICON_ERROR )
dlg.ShowModal()
dlg.Destroy()
return
cycle.period = self.sc.GetValue()
cycle.by_average = self.cb1.GetValue()
cycle.disp = self.rb.GetSelection()
cycle.first_week_day = self.rb1.GetSelection()
self.EndModal(wx.ID_OK)
def OnCancel(self, event):
self.EndModal(wx.ID_CANCEL)
def OnChangePasswd(self, event):
dlg = Ask_Passwd_Dlg(self)
dlg.ShowModal()
dlg.Destroy()
def OnColours(self, event):
dlg = Colours_Dlg(self)
dlg.ShowModal()
dlg.Destroy()
#---------------------------------------------------------------------------
class Ask_Passwd_Dlg(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, _('Password'))
#======================
box = wx.BoxSizer(wx.VERTICAL)
box.Add(wx.StaticText(self, -1, _('Enter your password')), 0,
wx.ALIGN_CENTER|wx.TOP|wx.LEFT|wx.RIGHT, 10)
self.pass1 = wx.TextCtrl(self, -1, "", wx.Point(10, 30),
size=(130, -1), style=wx.TE_PASSWORD)
box.Add(self.pass1, 0, wx.ALIGN_CENTER | wx.ALL, 10)
box.Add(wx.StaticText(self, -1, _('Once more...')), 0,
wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, 10)
self.pass2 = wx.TextCtrl(self, -1, "", wx.Point(10, 80),
size=(130, -1), style=wx.TE_PASSWORD)
box.Add(self.pass2, 0, wx.ALIGN_CENTER|wx.ALL, 10)
b1 = wx.BoxSizer(wx.HORIZONTAL)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Ok')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Cancel')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=i)
self.pass1.SetFocus()
box.Add(b1, 0, wx.ALIGN_CENTER)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
def OnOk(self, event):
err = ""
if self.pass1.GetValue() == "" or self.pass2.GetValue() == "":
err = _('Password must be not EMPTY!')
if self.pass1.GetValue() != self.pass2.GetValue():
err = _('Entering password don\'t match!')
if err != "":
dlg = wx.MessageDialog(self, err,
_('Error!'), wx.OK | wx.ICON_ERROR )
dlg.ShowModal()
dlg.Destroy()
return
cycle.passwd = self.pass1.GetValue()
self.EndModal(wx.ID_OK)
def OnCancel(self, event):
self.EndModal(wx.ID_CANCEL)
#---------------------------------------------------------------------------
def get_users():
#Get list of users
magic_str = 'UserName='
users = [] #array of (user, file) name
p, f_name = get_f_name()
if os.path.exists(p):
files = os.listdir(p)
for f in files:
fd = open(os.path.join(p, f), "rb")
try:
data = cPickle.loads(fd.read())
except (cPickle.UnpicklingError, ImportError, AttributeError, EOFError, IndexError):
fd.seek(0)
data = fd.read(len(magic_str))
if 'username' in data:
users.append((data['username'], f))
elif data == magic_str:
data = fd.read()
n = data.find("===") #find end string
if n is not -1:
users.append((cPickle.loads(data[:n]), f))
else: #old format
users.append((f, f))
users.sort()
return users
#---------------------------------------------------------------------------
class Login_Dlg(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, _('Login'))
self.name = ""
self.file = ""
box = wx.BoxSizer(wx.VERTICAL)
#Get list of users
self.users = get_users()
# p, f_name = get_f_name()
# if os.path.exists(p):
# users = os.listdir(p)
# else:
# users = [_('empty')]
# users.sort()
#======== List users ==============
i = wx.NewId()
self.il = wx.ImageList(16, 16, True)
bmp = wx.Bitmap(os.path.join(bitmaps_dir, 'smiles.bmp'), wx.BITMAP_TYPE_BMP)
mask = wx.Mask(bmp, wx.WHITE)
bmp.SetMask(mask)
idx1 = self.il.Add(bmp)
self.list = wx.ListCtrl(self, i, size = wx.Size(200, 200),
style=wx.LC_REPORT|wx.SUNKEN_BORDER|wx.LC_SINGLE_SEL)
self.list.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self.list.InsertColumn(0, _('Your name'))
for k in range(len(self.users)):
self.list.InsertImageStringItem(k, self.users[k][0], idx1)
self.list.SetColumnWidth(0, 180)
self.list.SetItemState(0, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
self.name = self.users[0][0]
self.file = self.users[0][1]
self.list.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, self.list)
self.list.Bind(wx.EVT_LIST_KEY_DOWN, self.OnKeyDown, self.list)
box.Add(self.list, 0, wx.ALL, 10)
#========= Add user =============
i = wx.NewId()
box.Add(wx.Button(self, i, _('Add user')), 0, wx.ALIGN_CENTER)
self.Bind(wx.EVT_BUTTON, self.OnAdd, id=i)
#========= Ok - Cancel =============
b1 = wx.BoxSizer(wx.HORIZONTAL)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Ok')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Cancel')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=i)
box.Add(b1, 0, wx.ALIGN_CENTER)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
self.list.SetFocus()
def OnItemSelected(self, event):
self.name = self.users[event.GetIndex()][0] #self.list.GetItemText(event.GetIndex())
self.file = self.users[event.GetIndex()][1]
def OnKeyDown(self, event):
if event.GetKeyCode() == ord(" ") or event.GetKeyCode() == wx.WXK_RETURN:
self.OnOk()
else:
event.Skip()
def OnAdd(self, event=None):
if ask_name(self):
self.EndModal(wx.ID_OK)
def OnOk(self, event=None):
dlg = wx.TextEntryDialog(self, self.name + _(', enter you password:'), _('Password'), '',
style=wx.OK | wx.CANCEL | wx.TE_PASSWORD)
while dlg.ShowModal() == wx.ID_OK:
cycle.passwd = dlg.GetValue()
cycle.name = self.name
cycle.file = self.file
if Load_Cycle(cycle.name, cycle.passwd, cycle.file):
dlg.Destroy()
self.EndModal(wx.ID_OK)
return
else:
dlg2 = wx.MessageDialog(self, _('Password is invalid!'),
_('Error!'), wx.OK | wx.ICON_ERROR )
dlg2.ShowModal()
dlg2.Destroy()
dlg.Destroy()
def OnCancel(self, event):
self.EndModal(wx.ID_CANCEL)
#-------------------------------------------------------
def first_login():
#Get list of users
users = get_users()
if users != []:
return 'not_first' #user(s) already exists
if ask_name():
return 'first'
else:
return 'bad_login'
#-------------------------------------------------------
def get_new_file_name():
#create filename for user
while True:
random_chars = "".join(chr(random.randint(0,255)) for i in xrange(4))
random_chars = base64.urlsafe_b64encode(random_chars)
p, random_chars = get_f_name(random_chars)
if not os.path.isfile(random_chars):
return random_chars
#-------------------------------------------------------
def ask_name(parent=None):
# nobody, it is first login
wx.MessageBox(
_("This program is not a reliable contraceptive method.\n"
"Neither does it help to prevent sexually transmitted diseases\n"
"like HIV/AIDS.\n\nIt is just an electronic means of keeping track\n"
"of some of your medical data and extracting some statistical\n"
"conclusions from them. You cannot consider this program as a\n"
"substitute for your gynecologist in any way."))
dlg = wx.TextEntryDialog(parent, _('Enter you name:'), _('New user'), '',
style=wx.OK | wx.CANCEL)
while dlg.ShowModal() == wx.ID_OK:
name = dlg.GetValue()
if name != "":
users = get_users()
exists = False
for i in users:
if name == i[0]:
exists = True
break
if not exists:
d = Ask_Passwd_Dlg(parent)
if d.ShowModal() == wx.ID_OK:
cycle.file = get_new_file_name()
cycle.name = name
d.Destroy()
dlg.Destroy()
#self.EndModal(wx.ID_OK)
set_color_default()
return True
else:
d.Destroy()
continue
else:
err = name + _(' - already exists!')
else:
err = _('Name must be not EMPTY')
d2 = wx.MessageDialog(dlg, err, _('Error!'), wx.OK | wx.ICON_ERROR)
d2.ShowModal()
d2.Destroy()
dlg.Destroy()
return False
#---------------------------------------------------------------------------
class Legend_Dlg(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, _('Legend'))
#======================
box = wx.BoxSizer(wx.VERTICAL)
self._add(box, _('today'), wx.NullColour, wx.SIMPLE_BORDER)
self._add(box, _('begin of cycle'), cycle.colour_set['begin'])
self._add(box, _('prognosis of cycle begin'), cycle.colour_set['prog begin'])
self._add(box, _('conception'), cycle.colour_set['conception'])
self._add(box, _('fertile'), cycle.colour_set['fertile'])
self._add(box, _('ovulation, birth'), cycle.colour_set['ovule'])
self._add(box, _('1-st tablet'), cycle.colour_set['1-st tablet'])
self._add(box, _('tablets no. 22-28 or pause'), cycle.colour_set['pause'])
self._add(box, _('next 1-st tablet'), cycle.colour_set['next 1-st tablet'])
i = wx.NewId()
box.Add(wx.Button(self, i, _('Ok')), 0, wx.ALIGN_CENTER|wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
def _add(self, box, txt, col, st=0):
b = wx.BoxSizer(wx.HORIZONTAL)
w = wx.Window(self, -1, size=wx.Size(15, 15), style=st)
w.SetBackgroundColour(col)
b.Add(w, 0, wx.LEFT|wx.RIGHT, 10)
b.Add(wx.StaticText(self, -1, txt), 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 10)
box.Add(b, 0, wx.TOP, 10)
def OnOk(self, event):
self.EndModal(wx.ID_OK)
#---------------------------------------------------------------------------
class Note_Dlg(wx.Dialog):
def __init__(self, parent, title="", txt=""):
wx.Dialog.__init__(self, parent, -1, title)
self.CentreOnParent(wx.BOTH)
#======================
box = wx.BoxSizer(wx.VERTICAL)
self.txt = wx.TextCtrl(self, -1, txt,
size=(-1, 100), style=wx.TE_MULTILINE)
box.Add( self.txt, 0,
wx.EXPAND|wx.ALIGN_CENTER|wx.TOP|wx.LEFT|wx.RIGHT, 10)
b1 = wx.BoxSizer(wx.HORIZONTAL)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Ok')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Cancel')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=i)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Remove')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnRemove, id=i)
box.Add(b1, 0, wx.ALIGN_CENTER)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
self.txt.SetFocus()
def OnOk(self, event):
self.EndModal(wx.ID_OK)
def OnCancel(self, event):
self.EndModal(wx.ID_CANCEL)
def OnRemove(self, event):
self.EndModal(False)
def Get_Txt(self):
return self.txt.GetValue()
#---------------------------------------------------------------------------
class MyHtmlWindow(wx.html.HtmlWindow):
def __init__(self, parent, id, pos = wx.DefaultPosition, size=wx.DefaultSize):
wx.html.HtmlWindow.__init__(self, parent, id, pos, size)
if "gtk2" in wx.PlatformInfo:
self.SetStandardFonts()
def OnLinkClicked(self, linkinfo):
pass
#---------------------------------------------------------------------------
class Help_Dlg(wx.Dialog):
def __init__(self, parent, title="", txt=""):
wx.Dialog.__init__(self, parent, -1, title)
self.CentreOnParent(wx.BOTH)
#======================
box = wx.BoxSizer(wx.VERTICAL)
self.html = MyHtmlWindow(self, -1, size=(500, 350))
self.html.SetPage(txt)
box.Add(self.html, 0, wx.ALIGN_CENTER|wx.TOP|wx.LEFT|wx.RIGHT, 10)
i = wx.NewId()
box.Add(wx.Button(self, i, _('Ok')), 0, wx.ALIGN_CENTER|wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
def OnOk(self, event):
self.EndModal(wx.ID_OK)
#---------------------------------------------------------------------------
class Colours_Dlg(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, _('Colours settings'))
self.col_set = cycle.colour_set.copy()
self.col_id = cycle.colour_set.keys()
self.data = wx.ColourData()
self.data.SetChooseFull(True)
self.buttons = {}
#======================
box = wx.BoxSizer(wx.VERTICAL)
self._add(box, _('begin of cycle'), 'begin')
self._add(box, _('prognosis of cycle begin'), 'prog begin')
self._add(box, _('conception'), 'conception')
self._add(box, _('fertile'), 'fertile')
self._add(box, _('ovulation, birth'), 'ovule')
self._add(box, _('1-st tablet'), '1-st tablet')
self._add(box, _('tablets no. 22-28 or pause'), 'pause')
self._add(box, _('next 1-st tablet'), 'next 1-st tablet')
b1 = wx.BoxSizer(wx.HORIZONTAL)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Ok')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('By default')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnDefault, id=i)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Cancel')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=i)
box.Add(b1, 0, wx.ALIGN_CENTER)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
def _add(self, box, txt, col):
b = wx.BoxSizer(wx.HORIZONTAL)
i = self.col_id.index(col)
bt = wx.Button(self, i, "", size=wx.Size(15, 15))
self.Bind(wx.EVT_BUTTON, self.get_colour, id=i)
bt.SetBackgroundColour(self.col_set[col])
self.buttons.update({i:bt})
b.Add(bt, 0, wx.LEFT|wx.RIGHT, 10)
b.Add(wx.StaticText(self, -1, txt), 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 10)
box.Add(b, 0, wx.TOP, 10)
def get_colour(self, event):
c = self.col_set[ self.col_id[event.GetId()] ]
self.data.SetColour(c)
dlg = wx.ColourDialog(self, self.data)
if dlg.ShowModal() == wx.ID_OK:
self.data = dlg.GetColourData()
c = self.data.GetColour()
self.buttons[event.GetId()].SetBackgroundColour(c)
self.col_set[self.col_id[event.GetId()]] = c
def OnOk(self, event):
cycle.colour_set = self.col_set.copy()
Val.Cal.Draw_Mark()
self.EndModal(wx.ID_OK)
def OnDefault(self, event):
self.col_set = {'begin':wx.NamedColour('RED'),
'prog begin':wx.NamedColour('PINK'),
'conception':wx.NamedColour('MAGENTA'),
'fertile':wx.NamedColour('GREEN YELLOW'),
'ovule':wx.NamedColour('SPRING GREEN'),
'1-st tablet':wx.NamedColour('GOLD'),
'pause':wx.NamedColour('LIGHT BLUE'),
'next 1-st tablet':wx.NamedColour('PINK')}
for item in self.col_id:
self.buttons[self.col_id.index(item)].SetBackgroundColour(self.col_set[item])
def OnCancel(self, event):
self.EndModal(wx.ID_CANCEL)
#---------------------------------------------------------------------------
| gpl-2.0 | 7,211,397,491,379,076,000 | 35.852575 | 100 | 0.502699 | false |
rolobio/sshm | sshm/main.py | 1 | 5215 | #! /usr/bin/env python3
"""
This module allows the console to use SSHM's functionality.
This module should only be run by the console!
"""
from __future__ import print_function
import sys
try: # pragma: no cover version specific
from lib import sshm
except ImportError: # pragma: no cover version specific
from sshm.lib import sshm
__all__ = ['main']
def get_argparse_args(args=None):
"""
Get the arguments passed to this script when it was run.
@param args: A list of arguments passed in the console.
@type args: list
@returns: A tuple containing (args, command, extra_args)
@rtype: tuple
"""
try: # pragma: no cover
from _info import __version__, __long_description__
except ImportError: # pragma: no cover
from sshm._info import __version__, __long_description__
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__long_description__)
parser.add_argument('servers', nargs='+')
parser.add_argument('command')
parser.add_argument('-s', '--sorted-output', action='store_true', default=False,
help='Sort the output by the URI of each instance. This will wait for all instances to finish before showing any output!')
parser.add_argument('-p', '--strip-whitespace', action='store_true', default=False,
help='Remove any whitespace surrounding the output of each instance.')
parser.add_argument('-d', '--disable-formatting', action='store_true', default=False,
help='Disable command formatting.')
parser.add_argument('-u', '--quiet', action='store_true', default=False,
help="Hide SSHM's server information on output (this implies sorted).")
parser.add_argument('-w', '--workers', type=int, default=20,
help="Limit the amount of concurrent SSH connections.")
parser.add_argument('--version', action='version', version='%(prog)s '+__version__)
args, extra_args = parser.parse_known_args(args=args)
# Move any servers that start with a - to extra_args
new_servers = []
for i in args.servers:
if i.startswith('-'):
extra_args.append(i)
else:
new_servers.append(i)
args.servers = new_servers
# If the comand starts with a -, replace it with the last server and
# move the command to extra_args.
if args.command.startswith('-'):
extra_args.append(args.command)
args.command = args.servers.pop(-1)
if args.quiet:
args.sorted_output = True
return (args, args.command, extra_args)
def _print_handling_newlines(uri, return_code, to_print, header='', strip_whitespace=False, quiet=False, file=sys.stdout):
"""
Print "to_print" to "file" with the formatting needed to represent it's data
properly.
"""
if strip_whitespace:
to_print = to_print.strip()
if to_print.count('\n') == 0:
sep = ' '
else:
sep = '\n'
output_str = 'sshm: {header}{uri}({return_code}):{sep}{to_print}'
if quiet:
output_str = '{to_print}'
print(output_str.format(header=header,
uri=uri,
return_code=return_code,
sep=sep,
to_print=to_print), file=file)
def main():
"""
Run SSHM using console provided arguments.
This should only be run using a console!
"""
import select
args, command, extra_arguments = get_argparse_args()
# Only provided stdin if there is data
r_list, i, i = select.select([sys.stdin], [], [], 0)
if r_list:
stdin = r_list[0]
else:
stdin = None
# Perform the command on each server, print the results to stdout.
results = sshm(args.servers, command, extra_arguments, stdin, args.disable_formatting, args.workers)
# If a sorted output is requested, gather all results before output.
if args.sorted_output:
results = list(results)
results = sorted(results, key=lambda x: x['uri'])
exit_code = 0
for result in results:
exit_code = exit_code or result.get('return_code')
if result.get('stdout') != None:
_print_handling_newlines(result['uri'],
result['return_code'],
result['stdout'],
strip_whitespace=args.strip_whitespace,
quiet=args.quiet,
)
if result.get('stderr'):
_print_handling_newlines(result['uri'],
result.get('return_code', ''),
result['stderr'],
'Error: ',
strip_whitespace=args.strip_whitespace,
quiet=args.quiet,
file=sys.stderr,
)
if result.get('traceback'):
_print_handling_newlines(result['uri'],
result['traceback'],
'Traceback: ',
strip_whitespace=args.strip_whitespace,
quiet=args.quiet,
file=sys.stderr,
)
# Exit with non-zero when there is a failure
sys.exit(exit_code)
if __name__ == '__main__':
main()
| gpl-2.0 | 6,128,735,719,583,437,000 | 33.536424 | 135 | 0.595781 | false |
jmathai/elodie | elodie/tests/config_test.py | 1 | 3912 | from __future__ import absolute_import
# Project imports
import os
import sys
import unittest
from mock import patch
from tempfile import gettempdir
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))))
from elodie import constants
from elodie.config import load_config, load_plugin_config
@patch('elodie.config.config_file', '%s/config.ini-singleton-success' % gettempdir())
def test_load_config_singleton_success():
with open('%s/config.ini-singleton-success' % gettempdir(), 'w') as f:
f.write("""
[MapQuest]
key=your-api-key-goes-here
prefer_english_names=False
""")
if hasattr(load_config, 'config'):
del load_config.config
config = load_config()
assert config['MapQuest']['key'] == 'your-api-key-goes-here', config.get('MapQuest', 'key')
config.set('MapQuest', 'key', 'new-value')
config = load_config()
if hasattr(load_config, 'config'):
del load_config.config
assert config['MapQuest']['key'] == 'new-value', config.get('MapQuest', 'key')
@patch('elodie.config.config_file', '%s/config.ini-does-not-exist' % gettempdir())
def test_load_config_singleton_no_file():
if hasattr(load_config, 'config'):
del load_config.config
config = load_config()
if hasattr(load_config, 'config'):
del load_config.config
assert config == {}, config
@patch('elodie.config.config_file', '%s/config.ini-load-plugin-config-unset-backwards-compat' % gettempdir())
def test_load_plugin_config_unset_backwards_compat():
with open('%s/config.ini-load-plugin-config-unset-backwards-compat' % gettempdir(), 'w') as f:
f.write("""
""")
if hasattr(load_config, 'config'):
del load_config.config
plugins = load_plugin_config()
if hasattr(load_config, 'config'):
del load_config.config
assert plugins == [], plugins
@patch('elodie.config.config_file', '%s/config.ini-load-plugin-config-exists-not-set' % gettempdir())
def test_load_plugin_config_exists_not_set():
with open('%s/config.ini-load-plugin-config-exists-not-set' % gettempdir(), 'w') as f:
f.write("""
[Plugins]
""")
if hasattr(load_config, 'config'):
del load_config.config
plugins = load_plugin_config()
if hasattr(load_config, 'config'):
del load_config.config
assert plugins == [], plugins
@patch('elodie.config.config_file', '%s/config.ini-load-plugin-config-one' % gettempdir())
def test_load_plugin_config_one():
with open('%s/config.ini-load-plugin-config-one' % gettempdir(), 'w') as f:
f.write("""
[Plugins]
plugins=Dummy
""")
if hasattr(load_config, 'config'):
del load_config.config
plugins = load_plugin_config()
if hasattr(load_config, 'config'):
del load_config.config
assert plugins == ['Dummy'], plugins
@patch('elodie.config.config_file', '%s/config.ini-load-plugin-config-one-with-invalid' % gettempdir())
def test_load_plugin_config_one_with_invalid():
with open('%s/config.ini-load-plugin-config-one' % gettempdir(), 'w') as f:
f.write("""
[Plugins]
plugins=DNE
""")
if hasattr(load_config, 'config'):
del load_config.config
plugins = load_plugin_config()
if hasattr(load_config, 'config'):
del load_config.config
assert plugins == [], plugins
@patch('elodie.config.config_file', '%s/config.ini-load-plugin-config-many' % gettempdir())
def test_load_plugin_config_many():
with open('%s/config.ini-load-plugin-config-many' % gettempdir(), 'w') as f:
f.write("""
[Plugins]
plugins=GooglePhotos,Dummy
""")
if hasattr(load_config, 'config'):
del load_config.config
plugins = load_plugin_config()
if hasattr(load_config, 'config'):
del load_config.config
assert plugins == ['GooglePhotos','Dummy'], plugins
| apache-2.0 | 2,237,664,676,632,507,000 | 28.862595 | 114 | 0.652607 | false |
pthatcher/psync | src/history/__init__.py | 1 | 1996 | # Copyright (c) 2011, Peter Thatcher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Things in this module represent anything related to this history of
# files, but unrelated to a specific store of the files (such as the
# file system). It also contains the log for comparing histories and
# determining what kinds of actions are necessary to merge histories.
from store import HistoryStore
from entry import History, HistoryEntry, group_history_by_gpath
from diff import HistoryDiff, HistoryDiffType, diff_histories
from diff import MergeAction, MergeActionType, calculate_merge_actions
from mergelog import MergeLog
| bsd-3-clause | -8,313,958,480,378,426,000 | 54.444444 | 80 | 0.784068 | false |
dparaujo/projeto | app_inscricoes/questionarios/migrations/0002_auto_20170220_2126.py | 1 | 1224 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-02-21 00:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questionarios', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='tblquestionarioingresopsid',
name='pessoa',
),
migrations.RemoveField(
model_name='tblquestionariosocioeconomico',
name='pessoa',
),
migrations.AlterField(
model_name='tblquestionarioingresopsid',
name='fez_quantos_curso_psid',
field=models.IntegerField(choices=[(0, 'Nenhum Curso'), (1, 'Um Curso'), (3, 'Dois Cursos'), (4, 'Tr\xeas Cursos'), (5, 'Quatro Cursos'), (6, 'Fiz mais que quatro cursos')], verbose_name='Quantos curso voc\xea fez no PSID?'),
),
migrations.AlterField(
model_name='tblquestionariosocioeconomico',
name='cor_raca',
field=models.IntegerField(choices=[(0, 'Branca'), (1, 'Negro'), (3, 'Pardo'), (4, 'Amarela'), (5, 'Ind\xedgena'), (6, 'N\xe3o Declara')], verbose_name='Cor/Ra\xe7a'),
),
]
| gpl-3.0 | -3,572,128,225,930,525,000 | 36.090909 | 237 | 0.588235 | false |
crespyl/pcre2 | maint/MultiStage2.py | 1 | 23077 | #! /usr/bin/python
# Multistage table builder
# (c) Peter Kankowski, 2008
##############################################################################
# This script was submitted to the PCRE project by Peter Kankowski as part of
# the upgrading of Unicode property support. The new code speeds up property
# matching many times. The script is for the use of PCRE maintainers, to
# generate the pcre_ucd.c file that contains a digested form of the Unicode
# data tables.
#
# The script has now been upgraded to Python 3 for PCRE2, and should be run in
# the maint subdirectory, using the command
#
# [python3] ./MultiStage2.py >../src/pcre2_ucd.c
#
# It requires four Unicode data tables, DerivedGeneralCategory.txt,
# GraphemeBreakProperty.txt, Scripts.txt, and CaseFolding.txt, to be in the
# Unicode.tables subdirectory. The first of these is found in the "extracted"
# subdirectory of the Unicode database (UCD) on the Unicode web site; the
# second is in the "auxiliary" subdirectory; the other two are directly in the
# UCD directory.
#
# Minor modifications made to this script:
# Added #! line at start
# Removed tabs
# Made it work with Python 2.4 by rewriting two statements that needed 2.5
# Consequent code tidy
# Adjusted data file names to take from the Unicode.tables directory
# Adjusted global table names by prefixing _pcre_.
# Commented out stuff relating to the casefolding table, which isn't used;
# removed completely in 2012.
# Corrected size calculation
# Add #ifndef SUPPORT_UCP to use dummy tables when no UCP support is needed.
# Update for PCRE2: name changes, and SUPPORT_UCP is abolished.
#
# Major modifications made to this script:
# Added code to add a grapheme break property field to records.
#
# Added code to search for sets of more than two characters that must match
# each other caselessly. A new table is output containing these sets, and
# offsets into the table are added to the main output records. This new
# code scans CaseFolding.txt instead of UnicodeData.txt.
#
# Update for Python3:
# . Processed with 2to3, but that didn't fix everything
# . Changed string.strip to str.strip
# . Added encoding='utf-8' to the open() call
# . Inserted 'int' before blocksize/ELEMS_PER_LINE because an int is
# required and the result of the division is a float
#
# The main tables generated by this script are used by macros defined in
# pcre2_internal.h. They look up Unicode character properties using short
# sequences of code that contains no branches, which makes for greater speed.
#
# Conceptually, there is a table of records (of type ucd_record), containing a
# script number, character type, grapheme break type, offset to caseless
# matching set, and offset to the character's other case for every character.
# However, a real table covering all Unicode characters would be far too big.
# It can be efficiently compressed by observing that many characters have the
# same record, and many blocks of characters (taking 128 characters in a block)
# have the same set of records as other blocks. This leads to a 2-stage lookup
# process.
#
# This script constructs four tables. The ucd_caseless_sets table contains
# lists of characters that all match each other caselessly. Each list is
# in order, and is terminated by NOTACHAR (0xffffffff), which is larger than
# any valid character. The first list is empty; this is used for characters
# that are not part of any list.
#
# The ucd_records table contains one instance of every unique record that is
# required. The ucd_stage1 table is indexed by a character's block number, and
# yields what is in effect a "virtual" block number. The ucd_stage2 table is a
# table of "virtual" blocks; each block is indexed by the offset of a character
# within its own block, and the result is the offset of the required record.
#
# Example: lowercase "a" (U+0061) is in block 0
# lookup 0 in stage1 table yields 0
# lookup 97 in the first table in stage2 yields 16
# record 17 is { 33, 5, 11, 0, -32 }
# 33 = ucp_Latin => Latin script
# 5 = ucp_Ll => Lower case letter
# 11 = ucp_gbOther => Grapheme break property "Other"
# 0 => not part of a caseless set
# -32 => Other case is U+0041
#
# Almost all lowercase latin characters resolve to the same record. One or two
# are different because they are part of a multi-character caseless set (for
# example, k, K and the Kelvin symbol are such a set).
#
# Example: hiragana letter A (U+3042) is in block 96 (0x60)
# lookup 96 in stage1 table yields 88
# lookup 66 in the 88th table in stage2 yields 467
# record 470 is { 26, 7, 11, 0, 0 }
# 26 = ucp_Hiragana => Hiragana script
# 7 = ucp_Lo => Other letter
# 11 = ucp_gbOther => Grapheme break property "Other"
# 0 => not part of a caseless set
# 0 => No other case
#
# In these examples, no other blocks resolve to the same "virtual" block, as it
# happens, but plenty of other blocks do share "virtual" blocks.
#
# There is a fourth table, maintained by hand, which translates from the
# individual character types such as ucp_Cc to the general types like ucp_C.
#
# Philip Hazel, 03 July 2008
#
# 01-March-2010: Updated list of scripts for Unicode 5.2.0
# 30-April-2011: Updated list of scripts for Unicode 6.0.0
# July-2012: Updated list of scripts for Unicode 6.1.0
# 20-August-2012: Added scan of GraphemeBreakProperty.txt and added a new
# field in the record to hold the value. Luckily, the
# structure had a hole in it, so the resulting table is
# not much bigger than before.
# 18-September-2012: Added code for multiple caseless sets. This uses the
# final hole in the structure.
# 30-September-2012: Added RegionalIndicator break property from Unicode 6.2.0
# 13-May-2014: Updated for PCRE2
# 03-June-2014: Updated for Python 3
# 20-June-2014: Updated for Unicode 7.0.0
# 12-August-2014: Updated to put Unicode version into the file
##############################################################################
import re
import string
import sys
MAX_UNICODE = 0x110000
NOTACHAR = 0xffffffff
# Parse a line of Scripts.txt, GraphemeBreakProperty.txt or DerivedGeneralCategory.txt
def make_get_names(enum):
return lambda chardata: enum.index(chardata[1])
# Parse a line of CaseFolding.txt
def get_other_case(chardata):
if chardata[1] == 'C' or chardata[1] == 'S':
return int(chardata[2], 16) - int(chardata[0], 16)
return 0
# Read the whole table in memory, setting/checking the Unicode version
def read_table(file_name, get_value, default_value):
global unicode_version
f = re.match(r'^[^/]+/([^.]+)\.txt$', file_name)
file_base = f.group(1)
version_pat = r"^# " + re.escape(file_base) + r"-(\d+\.\d+\.\d+)\.txt$"
file = open(file_name, 'r', encoding='utf-8')
f = re.match(version_pat, file.readline())
version = f.group(1)
if unicode_version == "":
unicode_version = version
elif unicode_version != version:
print("WARNING: Unicode version differs in %s", file_name, file=sys.stderr)
table = [default_value] * MAX_UNICODE
for line in file:
line = re.sub(r'#.*', '', line)
chardata = list(map(str.strip, line.split(';')))
if len(chardata) <= 1:
continue
value = get_value(chardata)
m = re.match(r'([0-9a-fA-F]+)(\.\.([0-9a-fA-F]+))?$', chardata[0])
char = int(m.group(1), 16)
if m.group(3) is None:
last = char
else:
last = int(m.group(3), 16)
for i in range(char, last + 1):
# It is important not to overwrite a previously set
# value because in the CaseFolding file there are lines
# to be ignored (returning the default value of 0)
# which often come after a line which has already set
# data.
if table[i] == default_value:
table[i] = value
file.close()
return table
# Get the smallest possible C language type for the values
def get_type_size(table):
type_size = [("uint8_t", 1), ("uint16_t", 2), ("uint32_t", 4),
("signed char", 1), ("pcre_int16", 2), ("pcre_int32", 4)]
limits = [(0, 255), (0, 65535), (0, 4294967295),
(-128, 127), (-32768, 32767), (-2147483648, 2147483647)]
minval = min(table)
maxval = max(table)
for num, (minlimit, maxlimit) in enumerate(limits):
if minlimit <= minval and maxval <= maxlimit:
return type_size[num]
else:
raise OverflowError("Too large to fit into C types")
def get_tables_size(*tables):
total_size = 0
for table in tables:
type, size = get_type_size(table)
total_size += size * len(table)
return total_size
# Compress the table into the two stages
def compress_table(table, block_size):
blocks = {} # Dictionary for finding identical blocks
stage1 = [] # Stage 1 table contains block numbers (indices into stage 2 table)
stage2 = [] # Stage 2 table contains the blocks with property values
table = tuple(table)
for i in range(0, len(table), block_size):
block = table[i:i+block_size]
start = blocks.get(block)
if start is None:
# Allocate a new block
start = len(stage2) / block_size
stage2 += block
blocks[block] = start
stage1.append(start)
return stage1, stage2
# Print a table
def print_table(table, table_name, block_size = None):
type, size = get_type_size(table)
ELEMS_PER_LINE = 16
s = "const %s %s[] = { /* %d bytes" % (type, table_name, size * len(table))
if block_size:
s += ", block = %d" % block_size
print(s + " */")
table = tuple(table)
if block_size is None:
fmt = "%3d," * ELEMS_PER_LINE + " /* U+%04X */"
mult = MAX_UNICODE / len(table)
for i in range(0, len(table), ELEMS_PER_LINE):
print(fmt % (table[i:i+ELEMS_PER_LINE] + (i * mult,)))
else:
if block_size > ELEMS_PER_LINE:
el = ELEMS_PER_LINE
else:
el = block_size
fmt = "%3d," * el + "\n"
if block_size > ELEMS_PER_LINE:
fmt = fmt * int(block_size / ELEMS_PER_LINE)
for i in range(0, len(table), block_size):
print(("/* block %d */\n" + fmt) % ((i / block_size,) + table[i:i+block_size]))
print("};\n")
# Extract the unique combinations of properties into records
def combine_tables(*tables):
records = {}
index = []
for t in zip(*tables):
i = records.get(t)
if i is None:
i = records[t] = len(records)
index.append(i)
return index, records
def get_record_size_struct(records):
size = 0
structure = '/* When recompiling tables with a new Unicode version, please check the\n' + \
'types in this structure definition from pcre2_internal.h (the actual\n' + \
'field names will be different):\n\ntypedef struct {\n'
for i in range(len(records[0])):
record_slice = [record[i] for record in records]
slice_type, slice_size = get_type_size(record_slice)
# add padding: round up to the nearest power of slice_size
size = (size + slice_size - 1) & -slice_size
size += slice_size
structure += '%s property_%d;\n' % (slice_type, i)
# round up to the first item of the next structure in array
record_slice = [record[0] for record in records]
slice_type, slice_size = get_type_size(record_slice)
size = (size + slice_size - 1) & -slice_size
structure += '} ucd_record;\n*/\n\n'
return size, structure
def test_record_size():
tests = [ \
( [(3,), (6,), (6,), (1,)], 1 ), \
( [(300,), (600,), (600,), (100,)], 2 ), \
( [(25, 3), (6, 6), (34, 6), (68, 1)], 2 ), \
( [(300, 3), (6, 6), (340, 6), (690, 1)], 4 ), \
( [(3, 300), (6, 6), (6, 340), (1, 690)], 4 ), \
( [(300, 300), (6, 6), (6, 340), (1, 690)], 4 ), \
( [(3, 100000), (6, 6), (6, 123456), (1, 690)], 8 ), \
( [(100000, 300), (6, 6), (123456, 6), (1, 690)], 8 ), \
]
for test in tests:
size, struct = get_record_size_struct(test[0])
assert(size == test[1])
#print struct
def print_records(records, record_size):
print('const ucd_record PRIV(ucd_records)[] = { ' + \
'/* %d bytes, record size %d */' % (len(records) * record_size, record_size))
records = list(zip(list(records.keys()), list(records.values())))
records.sort(key = lambda x: x[1])
for i, record in enumerate(records):
print((' {' + '%6d, ' * len(record[0]) + '}, /* %3d */') % (record[0] + (i,)))
print('};\n')
script_names = ['Arabic', 'Armenian', 'Bengali', 'Bopomofo', 'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal', \
'Cherokee', 'Common', 'Coptic', 'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic', 'Georgian', \
'Glagolitic', 'Gothic', 'Greek', 'Gujarati', 'Gurmukhi', 'Han', 'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana', \
'Inherited', 'Kannada', 'Katakana', 'Kharoshthi', 'Khmer', 'Lao', 'Latin', 'Limbu', 'Linear_B', 'Malayalam', \
'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya', 'Runic', \
'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu', 'Thaana', \
'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi', \
# New for Unicode 5.0
'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician', \
# New for Unicode 5.1
'Carian', 'Cham', 'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese', 'Vai', \
# New for Unicode 5.2
'Avestan', 'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic', \
'Inscriptional_Pahlavi', 'Inscriptional_Parthian', \
'Javanese', 'Kaithi', 'Lisu', 'Meetei_Mayek', \
'Old_South_Arabian', 'Old_Turkic', 'Samaritan', 'Tai_Tham', 'Tai_Viet', \
# New for Unicode 6.0.0
'Batak', 'Brahmi', 'Mandaic', \
# New for Unicode 6.1.0
'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao', 'Sharada', 'Sora_Sompeng', 'Takri',
# New for Unicode 7.0.0
'Bassa_Vah', 'Caucasian_Albanian', 'Duployan', 'Elbasan', 'Grantha', 'Khojki', 'Khudawadi',
'Linear_A', 'Mahajani', 'Manichaean', 'Mende_Kikakui', 'Modi', 'Mro', 'Nabataean',
'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi',
'Pau_Cin_Hau', 'Siddham', 'Tirhuta', 'Warang_Citi'
]
category_names = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu',
'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps',
'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs' ]
break_property_names = ['CR', 'LF', 'Control', 'Extend', 'Prepend',
'SpacingMark', 'L', 'V', 'T', 'LV', 'LVT', 'Regional_Indicator', 'Other' ]
test_record_size()
unicode_version = ""
script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Common'))
category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn'))
break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other'))
other_case = read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0)
# This block of code was added by PH in September 2012. I am not a Python
# programmer, so the style is probably dreadful, but it does the job. It scans
# the other_case table to find sets of more than two characters that must all
# match each other caselessly. Later in this script a table of these sets is
# written out. However, we have to do this work here in order to compute the
# offsets in the table that are inserted into the main table.
# The CaseFolding.txt file lists pairs, but the common logic for reading data
# sets only one value, so first we go through the table and set "return"
# offsets for those that are not already set.
for c in range(0x10ffff):
if other_case[c] != 0 and other_case[c + other_case[c]] == 0:
other_case[c + other_case[c]] = -other_case[c]
# Now scan again and create equivalence sets.
sets = []
for c in range(0x10ffff):
o = c + other_case[c]
# Trigger when this character's other case does not point back here. We
# now have three characters that are case-equivalent.
if other_case[o] != -other_case[c]:
t = o + other_case[o]
# Scan the existing sets to see if any of the three characters are already
# part of a set. If so, unite the existing set with the new set.
appended = 0
for s in sets:
found = 0
for x in s:
if x == c or x == o or x == t:
found = 1
# Add new characters to an existing set
if found:
found = 0
for y in [c, o, t]:
for x in s:
if x == y:
found = 1
if not found:
s.append(y)
appended = 1
# If we have not added to an existing set, create a new one.
if not appended:
sets.append([c, o, t])
# End of loop looking for caseless sets.
# Now scan the sets and set appropriate offsets for the characters.
caseless_offsets = [0] * MAX_UNICODE
offset = 1;
for s in sets:
for x in s:
caseless_offsets[x] = offset
offset += len(s) + 1
# End of block of code for creating offsets for caseless matching sets.
# Combine the tables
table, records = combine_tables(script, category, break_props,
caseless_offsets, other_case)
record_size, record_struct = get_record_size_struct(list(records.keys()))
# Find the optimum block size for the two-stage table
min_size = sys.maxsize
for block_size in [2 ** i for i in range(5,10)]:
size = len(records) * record_size
stage1, stage2 = compress_table(table, block_size)
size += get_tables_size(stage1, stage2)
#print "/* block size %5d => %5d bytes */" % (block_size, size)
if size < min_size:
min_size = size
min_stage1, min_stage2 = stage1, stage2
min_block_size = block_size
print("/* This module is generated by the maint/MultiStage2.py script.")
print("Do not modify it by hand. Instead modify the script and run it")
print("to regenerate this code.")
print()
print("As well as being part of the PCRE2 library, this module is #included")
print("by the pcre2test program, which redefines the PRIV macro to change")
print("table names from _pcre2_xxx to xxxx, thereby avoiding name clashes")
print("with the library. At present, just one of these tables is actually")
print("needed. */")
print()
print("#ifndef PCRE2_PCRE2TEST")
print()
print("#ifdef HAVE_CONFIG_H")
print("#include \"config.h\"")
print("#endif")
print()
print("#include \"pcre2_internal.h\"")
print()
print("#endif /* PCRE2_PCRE2TEST */")
print()
print("/* Unicode character database. */")
print("/* This file was autogenerated by the MultiStage2.py script. */")
print("/* Total size: %d bytes, block size: %d. */" % (min_size, min_block_size))
print()
print("/* The tables herein are needed only when UCP support is built,")
print("and in PCRE2 that happens automatically with UTF support.")
print("This module should not be referenced otherwise, so")
print("it should not matter whether it is compiled or not. However")
print("a comment was received about space saving - maybe the guy linked")
print("all the modules rather than using a library - so we include a")
print("condition to cut out the tables when not needed. But don't leave")
print("a totally empty module because some compilers barf at that.")
print("Instead, just supply small dummy tables. */")
print()
print("#ifndef SUPPORT_UNICODE")
print("const ucd_record PRIV(ucd_records)[] = {{0,0,0,0,0 }};")
print("const uint8_t PRIV(ucd_stage1)[] = {0};")
print("const uint16_t PRIV(ucd_stage2)[] = {0};")
print("const uint32_t PRIV(ucd_caseless_sets)[] = {0};")
print("#else")
print()
print("const char *PRIV(unicode_version) = \"{}\";".format(unicode_version))
print()
print(record_struct)
# --- Added by PH: output the table of caseless character sets ---
print("const uint32_t PRIV(ucd_caseless_sets)[] = {")
print(" NOTACHAR,")
for s in sets:
s = sorted(s)
for x in s:
print(' 0x%04x,' % x, end=' ')
print(' NOTACHAR,')
print('};')
print()
# ------
print("/* When #included in pcre2test, we don't need this large table. */")
print()
print("#ifndef PCRE2_PCRE2TEST")
print()
print_records(records, record_size)
print_table(min_stage1, 'PRIV(ucd_stage1)')
print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size)
print("#if UCD_BLOCK_SIZE != %d" % min_block_size)
print("#error Please correct UCD_BLOCK_SIZE in pcre2_internal.h")
print("#endif")
print("#endif /* SUPPORT_UNICODE */")
print()
print("#endif /* PCRE2_PCRE2TEST */")
"""
# Three-stage tables:
# Find the optimum block size for 3-stage table
min_size = sys.maxint
for stage3_block in [2 ** i for i in range(2,6)]:
stage_i, stage3 = compress_table(table, stage3_block)
for stage2_block in [2 ** i for i in range(5,10)]:
size = len(records) * 4
stage1, stage2 = compress_table(stage_i, stage2_block)
size += get_tables_size(stage1, stage2, stage3)
# print "/* %5d / %3d => %5d bytes */" % (stage2_block, stage3_block, size)
if size < min_size:
min_size = size
min_stage1, min_stage2, min_stage3 = stage1, stage2, stage3
min_stage2_block, min_stage3_block = stage2_block, stage3_block
print "/* Total size: %d bytes" % min_size */
print_records(records)
print_table(min_stage1, 'ucd_stage1')
print_table(min_stage2, 'ucd_stage2', min_stage2_block)
print_table(min_stage3, 'ucd_stage3', min_stage3_block)
"""
| gpl-3.0 | 2,237,033,308,509,130,800 | 41.894052 | 143 | 0.604325 | false |
leebird/legonlp | annotation/align.py | 1 | 2062 | # -*- coding: utf-8 -*-
import os
import sys
import re
import codecs
from alignment import Alignment,Hirschberg
from readers import AnnParser
from writers import AnnWriter
writer = AnnWriter()
def get_phrase(text):
p = re.compile(ur'[a-zA-Z]+|[0-9]+|\s+|[.,;!\(\)]+')
lista = []
pre = 0
for m in p.finditer(text):
start = m.start()
end = m.end()
if pre < start:
lista.append(text[pre:start])
lista.append(text[start:end])
pre = end
return lista
for root,_,files in os.walk('input'):
for f in files:
if not f.endswith('.txt'):
continue
pmid = f[:-4]
print pmid
alter = os.path.join(root,pmid+'.txt')
alterFile = codecs.open(alter,'r','utf-8')
alterText = alterFile.read().strip()
alterFile.close()
reader = AnnParser(root,pmid+'.ann')
annotation = reader.parse()
if len(annotation['T']) == 0:
writer.write('output',pmid+'.ann',annotation)
continue
gold = os.path.join('output',pmid+'.txt')
goldFile = codecs.open(gold,'r','utf-8')
goldText = goldFile.read().strip()
goldFile.close()
entities = annotation['T']
goldPhrases = get_phrase(goldText)
alterPhrases = get_phrase(alterText)
h = Hirschberg(goldPhrases,alterPhrases)
#h = Hirschberg(list(goldText),list(alterText))
alignGold,alignAlter = h.align()
#print ''.join(alignGold)
#print ''.join(alignAlter)
alter2gold = h.map_alignment(''.join(alignGold),''.join(alignAlter))
for k,e in entities.iteritems():
start = int(e.start)
end = int(e.end)
e.start = alter2gold[start]
if alter2gold[end] - alter2gold[end-1] > 1:
e.end = alter2gold[end-1]+1
else:
e.end = alter2gold[end]
e.text = goldText[e.start:e.end]
writer.write('output',pmid+'.ann',annotation)
| gpl-2.0 | -4,057,926,957,120,265,700 | 27.638889 | 76 | 0.548497 | false |
khalim19/gimp-plugin-export-layers | export_layers/tests/test_placeholders.py | 1 | 2548 | # -*- coding: utf-8 -*-
#
# This file is part of Export Layers.
#
# Copyright (C) 2013-2019 khalim19 <[email protected]>
#
# Export Layers is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Export Layers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Export Layers. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import unittest
import parameterized
from export_layers import pygimplib as pg
from export_layers.pygimplib.tests import stubs_gimp
from .. import placeholders
class TestGetReplacedArgsAndKwargs(unittest.TestCase):
def test_get_replaced_args_and_kwargs(self):
image = stubs_gimp.ImageStub()
layer = stubs_gimp.LayerStub()
layer_exporter = object()
args = ["current_image", "current_layer", "some_other_arg"]
kwargs = {
"run_mode": 0, "image": "current_image", "layer": "current_layer"}
new_args, new_kwargs = placeholders.get_replaced_args_and_kwargs(
args, kwargs, image, layer, layer_exporter)
self.assertListEqual(new_args, [image, layer, "some_other_arg"])
self.assertDictEqual(new_kwargs, {"run_mode": 0, "image": image, "layer": layer})
class TestPlaceHolderSetting(unittest.TestCase):
@parameterized.parameterized.expand([
("placeholder", placeholders.PlaceholderSetting, []),
("image_placeholder", placeholders.PlaceholderImageSetting, ["current_image"]),
])
def test_get_allowed_placeholder_names(
self, test_case_name_suffix, placeholder_setting_type, expected_result):
self.assertListEqual(
placeholder_setting_type.get_allowed_placeholder_names(), expected_result)
@parameterized.parameterized.expand([
("placeholder", placeholders.PlaceholderSetting, 0),
("image_placeholder", placeholders.PlaceholderImageSetting, 1),
])
def test_get_allowed_placeholders(
self, test_case_name_suffix, placeholder_setting_type, expected_length):
self.assertEqual(len(placeholder_setting_type.get_allowed_placeholders()), expected_length)
| gpl-3.0 | -1,541,752,391,555,362,800 | 35.927536 | 95 | 0.730377 | false |
dmych/cn | utils.py | 1 | 3805 | # This file is part of Coffee Notes project
#
# Coffee Notes is a crossplatform note-taking application
# inspired by Notational Velocity.
# <https://github.com/dmych/cn>
#
# Copyright (c) Dmitri Brechalov, 2011
#
# Coffee Notes is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Coffee Notes is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Coffee Notes. If not, see <http://www.gnu.org/licenses/>.
LOGNAME = '/tmp/cn.log'
def log(msg, restart=False):
if restart:
f = open(LOGNAME, 'w')
else:
f = open(LOGNAME, 'a')
f.write('%s\n' % msg)
f.close()
def getProgramPath():
import sys
import os.path
pname = sys.argv[0]
if os.path.islink(pname):
pname = os.readlink(pname)
return os.path.abspath(os.path.dirname(pname))
def openConfig(fname, mode):
'''Return config file object'''
import os.path
return open(os.path.expanduser(fname), mode)
class SimpleConfig:
def __init__(self, fileName):
self.data = {}
self.fileName = fileName
self.__readData()
def __readData(self):
self.data = {}
try:
f = openConfig(self.fileName, 'r')
except:
log('CANNOT FIND %s' % self.fileName)
return
for line in f.readlines():
line = line.strip()
if not line or line.startswith('#'):
continue # just empty line or comment
try:
(key, value) = line.split('=', 1)
self.data[key] = value
except:
pass
f.close()
log('CONFIG')
log(repr(self.data))
def save(self):
f = openConfig(self.fileName, 'w')
for (k, v) in self.data.items():
f.write('%s=%s\n' % (k, v))
f.close()
def close(self):
self.save()
def clear(self):
self.data.clear()
def readStr(self, key, default=None):
try:
value = self.data[key]
except:
value = default
return value
def readInt(self, key, default=None):
try:
return int(self.readStr(key))
except:
return default
def readBool(self, key, default=False):
try:
return bool(self.readInt(key))
except:
return default
def keys(self, start=None):
if start:
result = [ item for item in self.data.keys() \
if item.startswith(start) ]
else:
result = self.keys()
return result
def values(self, start=None):
keys = self.keys(start)
result = [ self.data[k] for k in keys ]
return result
def writeStr(self, key, value):
self.data[key] = str(value)
writeInt = writeStr
def writeBool(self, key, value):
self.writeStr(key, int(value))
def strip_hashes(txt):
'''Strip all hashes and spaces at the begining and the end of line
'''
while txt and txt[0] in '# \t':
txt = txt[1:]
while txt and txt[-1] in '# \t':
txt = txt[:-1]
return txt
def sanitize(txt):
'''Replace all "dangerous" characters (such as <>|\/")
Also strip hashes and spaces at the beginning or end of the line
'''
txt = strip_hashes(txt)
for c in ' \t<>/\|"\'?*:;~':
txt = txt.replace(c, '-')
return txt
# end of utils.py
| gpl-3.0 | -7,858,388,737,307,353,000 | 26.178571 | 71 | 0.578975 | false |
cloudburst/libheap | libheap/pydbg/pygdbpython.py | 1 | 4894 | import sys
from functools import wraps
from libheap.frontend.printutils import print_error
try:
import gdb
except ImportError:
print("Not running inside of GDB, exiting...")
sys.exit()
def gdb_is_running(f):
"decorator to make sure gdb is running"
@wraps(f)
def _gdb_is_running(*args, **kwargs):
if (gdb.selected_thread() is not None):
return f(*args, **kwargs)
else:
print_error("GDB is not running.")
return _gdb_is_running
class pygdbpython:
def __init__(self):
self.inferior = None
@gdb_is_running
def execute(self, cmd, to_string=True):
return gdb.execute(cmd, to_string=to_string)
def format_address(self, value):
"""Helper for printing gdb.Value on both python 2 and 3
"""
try:
ret = int(value)
except gdb.error:
# python2 error: Cannot convert value to int.
# value.cast(gdb.lookup_type("unsigned long"))
ret = int(str(value).split(' ')[0], 16)
return ret
@gdb_is_running
def get_heap_address(self, mp=None):
"""Read heap address from glibc's mp_ structure if available,
otherwise fall back to /proc/self/maps which is unreliable.
"""
start, end = None, None
if mp is not None:
from libheap.ptmalloc.malloc_par import malloc_par
if isinstance(mp, malloc_par):
start = mp.sbrk_base
else:
print_error("Please specify a valid malloc_par variable")
# XXX: add end from arena(s).system_mem ?
else:
pid, task_id, thread_id = gdb.selected_thread().ptid
maps_file = "/proc/%d/task/%d/maps"
maps_data = open(maps_file % (pid, task_id)).readlines()
for line in maps_data:
if any(x.strip() == '[heap]' for x in line.split(' ')):
heap_range = line.split(' ')[0]
start, end = [int(h, 16) for h in heap_range.split('-')]
break
return start, end
@gdb_is_running
def get_arch(self):
cmd = self.execute("maintenance info sections ?")
return cmd.strip().split()[-1:]
def get_inferior(self):
try:
if self.inferior is None:
if len(gdb.inferiors()) == 0:
print_error("No gdb inferior could be found.")
return -1
else:
self.inferior = gdb.inferiors()[0]
return self.inferior
else:
return self.inferior
except AttributeError:
print_error("This gdb's python support is too old.")
sys.exit()
@gdb_is_running
def get_size_sz(self):
try:
_machine = self.get_arch()[0]
except IndexError:
_machine = ""
SIZE_SZ = 0
print_error("Retrieving SIZE_SZ failed.")
except TypeError: # gdb is not running
_machine = ""
SIZE_SZ = 0
print_error("Retrieving SIZE_SZ failed.")
if "elf64" in _machine:
SIZE_SZ = 8
elif "elf32" in _machine:
SIZE_SZ = 4
else:
SIZE_SZ = 0
print_error("Retrieving SIZE_SZ failed.")
return SIZE_SZ
@gdb_is_running
def read_memory(self, address, length):
if self.inferior is None:
self.inferior = self.get_inferior()
return self.inferior.read_memory(address, length)
@gdb_is_running
def read_variable(self, variable=None):
if variable is None:
print_error("Please specify a variable to read")
return None
try:
return gdb.selected_frame().read_var(variable)
except RuntimeError:
# No idea why this works but sometimes the frame is not selected
# print_error("No gdb frame is currently selected.\n")
try:
return gdb.selected_frame().read_var(variable)
except RuntimeError:
# variable was not found
# print_error("wrong here!")
return None
except ValueError:
# variable was not found
return None
@gdb_is_running
def string_to_argv(self, arg=None):
if arg is not None:
return gdb.string_to_argv(arg)
@gdb_is_running
def write_memory(self, address, buf, length=None):
if self.inferior is None:
self.inferior = self.get_inferior()
try:
if length is None:
self.inferior.write_memory(address, buf)
else:
self.inferior.write_memory(address, buf, length)
except MemoryError:
print_error("GDB inferior write_memory error")
| mit | -2,558,321,440,610,970,000 | 29.397516 | 76 | 0.53964 | false |
Panos512/inspire-next | inspirehep/modules/records/receivers.py | 1 | 12132 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015, 2016 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Pre-record receivers."""
from flask import current_app
from invenio_indexer.signals import before_record_index
from invenio_records.signals import (
before_record_insert,
before_record_update,
)
from inspirehep.utils.date import create_valid_date
from inspirehep.dojson.utils import get_recid_from_ref, classify_field
from inspirehep.dojson.utils import get_recid_from_ref
from inspirehep.utils.date import create_valid_date
from invenio_indexer.signals import before_record_index
import six
from .signals import after_record_enhanced
@before_record_index.connect
def enhance_record(sender, json, *args, **kwargs):
"""Runs all the record enhancers and fires the after_record_enhanced signals
to allow receivers work with a fully populated record."""
populate_inspire_subjects(sender, json, *args, **kwargs)
populate_inspire_document_type(sender, json, *args, **kwargs)
match_valid_experiments(sender, json, *args, **kwargs)
dates_validator(sender, json, *args, **kwargs)
add_recids_and_validate(sender, json, *args, **kwargs)
after_record_enhanced.send(json)
@before_record_insert.connect
@before_record_update.connect
def normalize_field_categories(sender, *args, **kwargs):
"""Normalize field_categories."""
for idx, field in enumerate(sender.get('field_categories', [])):
if field.get('scheme') == "INSPIRE" or '_scheme' in field or '_term' in field:
# Already normalized form
continue
original_term = field.get('term')
normalized_term = classify_field(original_term)
scheme = 'INSPIRE' if normalized_term else None
original_scheme = field.get('scheme')
if isinstance(original_scheme, (list, tuple)):
original_scheme = original_scheme[0]
updated_field = {
'_scheme': original_scheme,
'scheme': scheme,
'_term': original_term,
'term': normalized_term,
}
source = field.get('source')
if source:
if 'automatically' in source:
source = 'INSPIRE'
updated_field['source'] = source
sender['field_categories'][idx].update(updated_field)
def populate_inspire_subjects(recid, json, *args, **kwargs):
"""
Populate a json record before indexing it to elastic.
Adds a field for faceting INSPIRE subjects
"""
inspire_subjects = [
s['term'] for s in json.get('field_categories', [])
if s.get('scheme', '') == 'INSPIRE' and s.get('term')
]
json['facet_inspire_subjects'] = inspire_subjects
def populate_inspire_document_type(recid, json, *args, **kwargs):
""" Populates a json record before indexing it to elastic.
Adds a field for faceting INSPIRE document type
"""
inspire_doc_type = []
if 'collections' in json:
for element in json.get('collections', []):
if 'primary' in element and element.get('primary', ''):
if element['primary'].lower() == 'published':
inspire_doc_type.append('peer reviewed')
break
elif element['primary'].lower() == 'thesis':
inspire_doc_type.append(element['primary'].lower())
break
elif element['primary'].lower() == 'book':
inspire_doc_type.append(element['primary'].lower())
break
elif element['primary'].lower() == 'bookchapter':
inspire_doc_type.append('book chapter')
break
elif element['primary'].lower() == 'proceedings':
inspire_doc_type.append(element['primary'].lower())
break
elif element['primary'].lower() == 'conferencepaper':
inspire_doc_type.append('conference paper')
break
elif element['primary'].lower() == 'note':
inspire_doc_type.append('note')
break
elif element['primary'].lower() == 'report':
inspire_doc_type.append(element['primary'].lower())
break
elif element['primary'].lower() == 'activityreport':
inspire_doc_type.append('activity report')
break
complete_pub_info = []
if not inspire_doc_type:
for field in json.get('publication_info', []):
for k, v in field.iteritems():
complete_pub_info.append(k)
if ('page_start' not in complete_pub_info and
'artid' not in 'complete_pub_info'):
inspire_doc_type.append('preprint')
inspire_doc_type.extend([s['primary'].lower() for s in
json.get('collections', []) if 'primary'
in s and s['primary'] is not None and
s['primary'].lower() in
('review', 'lectures')])
json['facet_inspire_doc_type'] = inspire_doc_type
def match_valid_experiments(recid, json, *args, **kwargs):
"""Matches misspelled experiment names with valid experiments.
Tries to match with valid experiments by matching lowercased and
whitespace-free versions of known experiments.
"""
experiments = json.get("accelerator_experiments")
if experiments:
for exp in experiments:
# FIXME: These lists are temporary. We should have a list of experiment names
# that is generated from the current state of our data.
from .experiment_list import EXPERIMENTS_NAMES as experiments_list_original, experiments_list
facet_experiments_list = []
experiments = exp.get("experiment")
if experiments:
if type(experiments) is not list:
experiments = [experiments]
for experiment in experiments:
experiment = experiment.lower()
experiment = experiment.replace(' ', '')
try:
# Check if normalized form of experiment is in the list of
# valid experiments
x = experiments_list.index(experiment)
facet_experiment = experiments_list_original[x]
except ValueError:
# If the experiment cannot be matched it is considered
# valid
facet_experiment = exp.get("experiment")
facet_experiments_list.append(facet_experiment)
exp.update({"facet_experiment": [facet_experiments_list]})
def dates_validator(recid, json, *args, **kwargs):
"""Find and assign the correct dates in a record."""
dates_to_check = ['opening_date', 'closing_date', 'deadline_date']
for date_key in dates_to_check:
if date_key in json:
valid_date = create_valid_date(json[date_key])
if valid_date != json[date_key]:
current_app.logger.warning(
'MALFORMED: {0} value in {1}: {3}'.format(
date_key, recid, json[date_key]
)
)
json[date_key] = valid_date
def references_validator(recid, json, *args, **kwargs):
"""Find and assign the correct references in a record."""
for ref in json.get('references', []):
if ref.get('recid') and not six.text_type(ref.get('recid')).isdigit():
# Bad recid! Remove.
current_app.logger.warning(
'MALFORMED: recid value found in references of {0}: {1}'.format(recid, ref.get('recid')))
del ref['recid']
def populate_recid_from_ref(recid, json, *args, **kwargs):
"""Extracts recids from all reference fields and adds them to ES.
For every field that has as a value a reference object to another record,
add a sibling after extracting the record id. e.g.
{"record": {"$ref": "http://x/y/2}}
is transformed to:
{"record": {"$ref": "http://x/y/2},
"recid": 2}
Siblings are renamed using the following scheme:
Remove "record" occurrences and append _recid without doubling or
prepending underscores to the original name.
For every known list of object references add a new list with the
corresponding recids. e.g.
{"records": [{"$ref": "http://x/y/1"}, {"$ref": "http://x/y/2"}]}
is transformed to:
{"records": [{"$ref": "http://x/y/1"}, {"$ref": "http://x/y/2"}]
"recids": [1, 2]}
"""
list_ref_fields_translations = {
'deleted_records': 'deleted_recids'
}
def _recusive_find_refs(json_root):
if isinstance(json_root, list):
items = enumerate(json_root)
elif isinstance(json_root, dict):
# Note that items have to be generated before altering the dict.
# In this case, iteritems might break during iteration.
items = json_root.items()
else:
items = []
for key, value in items:
if (isinstance(json_root, dict) and isinstance(value, dict) and
'$ref' in value):
# Append '_recid' and remove 'record' from the key name.
key_basename = key.replace('record', '').rstrip('_')
new_key = '{}_recid'.format(key_basename).lstrip('_')
json_root[new_key] = get_recid_from_ref(value)
elif (isinstance(json_root, dict) and isinstance(value, list) and
key in list_ref_fields_translations):
new_list = [get_recid_from_ref(v) for v in value]
new_key = list_ref_fields_translations[key]
json_root[new_key] = new_list
else:
_recusive_find_refs(value)
_recusive_find_refs(json)
def add_recids_and_validate(recid, json, *args, **kwargs):
"""Ensure that recids are generated before being validated."""
populate_recid_from_ref(recid, json, *args, **kwargs)
references_validator(recid, json, *args, **kwargs)
@before_record_insert.connect
@before_record_update.connect
def normalize_field_categories(sender, *args, **kwargs):
"""Normalize field_categories."""
for idx, field in enumerate(sender.get('field_categories', [])):
if field.get('scheme') == "INSPIRE" or '_scheme' in field or '_term' in field:
# Already normalized form
continue
original_term = field.get('term')
normalized_term = classify_field(original_term)
scheme = 'INSPIRE' if normalized_term else None
original_scheme = field.get('scheme')
if isinstance(original_scheme, (list, tuple)):
original_scheme = original_scheme[0]
updated_field = {
'_scheme': original_scheme,
'scheme': scheme,
'_term': original_term,
'term': normalized_term,
}
source = field.get('source')
if source:
if 'automatically' in source:
source = 'INSPIRE'
updated_field['source'] = source
sender['field_categories'][idx].update(updated_field)
| gpl-2.0 | -3,472,888,303,116,580,000 | 40.406143 | 105 | 0.585889 | false |
so-sure/tagged-route53 | tagged-route53.py | 1 | 10149 | #!/usr/bin/python
import requests
import boto3
import argparse
class Dns(object):
# Default constructor of the class.
def __init__(self):
self.ec2_client = boto3.client('ec2')
self.dns_client = boto3.client('route53')
self.role = None
self.env = None
self.instance_id = None
self.instances = None
self.indexes = None
self.instance_count = None
self.hostname = None
self.ip = None
self.use_public_ip = None
self.domain = None
self.set_tag_name = True
self.set_dns_registration = True
self.force_dns_registration = False
self.tag_env = None
self.tag_role = None
self.tag_index = None
self.name = None
self.update_dns = True
self.quiet = False
self.update_index = True
def current_instance(self):
response = requests.get('http://169.254.169.254/latest/meta-data/instance-id')
self.instance_id = response.text
if not self.quiet:
print 'Instance: %s' % (self.instance_id)
def current_public_ip(self):
response = self.ec2_client.describe_instances(InstanceIds=[self.instance_id])
instances = response['Reservations']
self.ip = instances[0]['Instances'][0]['PublicIpAddress']
if not self.quiet:
print 'IP: %s' % (self.ip)
def current_private_ip(self):
response = self.ec2_client.describe_instances(InstanceIds=[self.instance_id])
instances = response['Reservations']
self.ip = instances[0]['Instances'][0]['PrivateIpAddress']
if not self.quiet:
print 'IP: %s' % (self.ip)
def current_role_env(self):
if self.instance_id is None:
self.current_instance()
response = self.ec2_client.describe_instances(InstanceIds=[self.instance_id])
instances = response['Reservations']
# Only 1 instance
tags = instances[0]['Instances'][0]['Tags']
for tag in tags:
if self.env is None and tag['Key'] == self.tag_env:
self.env = tag['Value']
elif self.role is None and tag['Key'] == self.tag_role:
self.role = tag['Value']
if not self.quiet:
print 'Env: %s Role: %s' % (self.env, self.role)
def get_instance_ids(self):
if self.env is None or self.role is None:
self.current_role_env()
filters = [
{ 'Name':'tag:%s' % (self.tag_env), 'Values':[self.env]},
{ 'Name':'tag:%s' % (self.tag_role), 'Values':[self.role]}
]
response = self.ec2_client.describe_instances(Filters=filters)
instances = response['Reservations']
if not self.quiet:
print 'Checking tags'
self.instances = {}
self.indexes = []
for instance in instances:
index = -1
if instance['Instances'][0]['State']['Name'] == 'running':
instance_id = instance['Instances'][0]['InstanceId']
tags = instance['Instances'][0]['Tags']
for tag in tags:
if tag['Key'] == self.tag_index:
index = tag['Value']
self.indexes.append(index)
self.instances[instance_id] = int(index)
def get_instance_count(self):
if self.instances is None:
self.get_instance_ids()
# the current instance will be in the list, but as we want to start at 1, that's good
self.instance_count = len(self.instances)
if not self.quiet:
print 'Instance count: %d' % (self.instance_count)
if self.instances.has_key(self.instance_id) and self.instances[self.instance_id] >= 0:
self.instance_count = self.instances[self.instance_id]
if not self.quiet:
print 'Index is already set %s' % (self.instance_count)
self.update_dns = False
self.update_index = False
if self.instance_count < 1:
raise Exception('Instance count must be 1 or more')
if not self.quiet:
print self.indexes
if self.update_index:
# May be replacing a previous server
for i in range(1, self.instance_count + 2):
if str(i) not in self.indexes:
self.instance_count = i
break
if not self.quiet:
print 'Using index: %d' % (self.instance_count)
if self.update_index:
self.ec2_client.create_tags(
Resources=[self.instance_id],
Tags=[{'Key': self.tag_index, 'Value': str(self.instance_count) }]
)
if self.set_tag_name:
name = '%s-%s-%d' % (self.env, self.role, self.instance_count)
if not self.quiet:
print 'Setting instance name: %s' % (name)
self.ec2_client.create_tags(
Resources=[self.instance_id],
Tags=[{'Key': 'Name', 'Value': name }]
)
def get_hostname(self):
if self.instance_count is None:
self.get_instance_count()
if self.name is None:
self.hostname = '%s-%d.%s.%s' % (self.role, self.instance_count, self.env, self.domain)
else:
self.hostname = "%s.%s" % (self.name, self.domain)
if not self.quiet:
print 'Hostname: %s' % (self.hostname)
else:
print self.hostname
def run_update_all(self):
self.get_instance_ids()
if not self.quiet:
print self.instances
for instance_id in self.instances.keys():
if not self.quiet:
print 'Updating instance %s' % (instance_id)
self.instance_id = instance_id
self.run_update_dns()
self.indexes.append(str(self.instance_count))
self.hostname = None
self.ip = None
self.instance_count = None
self.update_dns = True
def run_update_dns(self):
if self.hostname is None:
self.get_hostname()
if not self.update_dns and not self.force_dns_registration:
if not self.quiet:
print 'Skipping dns update as server already exists'
return
if not self.set_dns_registration:
if not self.quiet:
print 'Skipping dns registration as per request'
return
if self.ip is None:
if self.use_public_ip:
self.current_public_ip()
else:
self.current_private_ip()
response = self.dns_client.list_hosted_zones_by_name(
DNSName=self.domain
)
zone_id = response['HostedZones'][0]['Id'].replace('/hostedzone/', '')
response = self.dns_client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.hostname,
'Type': 'A',
'TTL': 60,
'ResourceRecords': [
{
'Value': self.ip
},
]
}
},
]
}
)
if not self.quiet:
print response
def main(self):
parser = argparse.ArgumentParser(description='Update route 53 dns based on server tags')
parser.add_argument('domain', help='Domain name')
parser.add_argument('--skip-tag-name', action='store_true', default=False, help='Skip setting the tag name')
parser.add_argument('--skip-dns-registration', action='store_true', default=False, help='If set, only display the dns entry and do run any dns updates')
parser.add_argument('--force-dns-registration', action='store_true', default=False, help='If set, only display the dns entry and do run any dns updates')
parser.add_argument('--quiet', action='store_true', default=False, help='If set, only output the hostname')
parser.add_argument('--tag-role', default='role', help='Role tag name (default: %(default)s)')
parser.add_argument('--tag-env', default='env', help='Environment tag name (default: %(default)s)')
parser.add_argument('--tag-index', default='index', help='Index tag name (default: %(default)s)')
parser.add_argument('--public-ip', action='store_true', default=False, help='Use public ip instead of private ip')
parser.add_argument('--name', default=None, help='Ignore tags and just set name')
parser.add_argument('--role', default=None, help='Ignore tags and use given role')
parser.add_argument('--env', default=None, help='Ignore tags and use given env')
parser.add_argument('--instance-id', default=None, help='If given, use instance id given rather than local instance')
parser.add_argument('--all-tags', action='store_true', default=False, help='If given, run for all instances that match tags for role/env. Can be used with --role and/or --env.')
args = parser.parse_args()
self.domain = args.domain
self.set_tag_name = not args.skip_tag_name
self.set_dns_registration = not args.skip_dns_registration
self.force_dns_registration = args.force_dns_registration
self.quiet = args.quiet
self.tag_env = args.tag_env
self.tag_role = args.tag_role
self.role = args.role
self.env = args.env
self.tag_index = args.tag_index
self.name = args.name
self.use_public_ip = args.public_ip
self.instance_id = args.instance_id
if args.all_tags:
self.run_update_all()
else:
self.run_update_dns()
if __name__ == '__main__':
launcher = Dns()
launcher.main()
| apache-2.0 | -2,124,829,672,126,975,700 | 38.034615 | 185 | 0.549611 | false |
OpenBfS/dokpool-plone | Plone/src/docpool.config/docpool/config/local/elan_de.py | 1 | 8161 | # -*- coding: utf-8 -*-
from ..utils import set_local_roles
from datetime import datetime
from docpool.base.content.documentpool import APPLICATIONS_KEY
from docpool.config import _
from docpool.config.general.elan import connectTypesAndCategories
from docpool.config.local.base import CONTENT_AREA
from docpool.config.utils import CHILDREN
from docpool.config.utils import createPloneObjects
from docpool.config.utils import ID
from docpool.config.utils import TITLE
from docpool.config.utils import TYPE
from docpool.elan.config import ELAN_APP
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import log_exc
from zExceptions import BadRequest
from zope.annotation.interfaces import IAnnotations
import transaction
# ELAN specific structures
def dpAdded(self):
"""
"""
annotations = IAnnotations(self)
fresh = ELAN_APP not in annotations[APPLICATIONS_KEY]
if fresh:
annotations[APPLICATIONS_KEY].append(ELAN_APP)
copyCurrentSituation(self, fresh)
transaction.commit()
createBasicPortalStructure(self, fresh)
transaction.commit()
createContentConfig(self, fresh)
transaction.commit()
if fresh:
self.esd.correctAllDocTypes()
transaction.commit()
connectTypesAndCategories(self)
placeful_wf = getToolByName(self, 'portal_placeful_workflow')
try:
self.archive.manage_addProduct[
'CMFPlacefulWorkflow'
].manage_addWorkflowPolicyConfig()
except BadRequest as e:
# print type(e)
log_exc(e)
config = placeful_wf.getWorkflowPolicyConfig(self.archive)
placefulWfName = 'elan-archive'
config.setPolicyIn(policy=placefulWfName, update_security=False)
config.setPolicyBelow(policy=placefulWfName, update_security=False)
createELANUsers(self)
createELANGroups(self)
setELANLocalRoles(self)
self.reindexAll()
BASICSTRUCTURE = [
{TYPE: 'ELANArchives', TITLE: u'Archive', ID: 'archive', CHILDREN: [], 'local_behaviors': ['elan']}
]
ARCHIVESTRUCTURE = [
{
TYPE: 'ELANCurrentSituation',
TITLE: 'Elektronische Lagedarstellung',
ID: 'esd',
CHILDREN: [],
},
CONTENT_AREA,
]
SPECIAL_PAGES = [
{TYPE: 'Text', TITLE: u'Hilfe', ID: 'help', CHILDREN: []},
]
ADMINSTRUCTURE = [
{
TYPE: 'ELANContentConfig',
TITLE: 'Konfiguration Inhalte',
ID: 'contentconfig',
CHILDREN: [
{
TYPE: 'DPEvents',
TITLE: u'Ereignisse',
ID: 'scen',
CHILDREN: [
{
TYPE: 'DPEvent',
TITLE: u'Normalfall',
ID: 'routinemode',
"Status": "active",
"TimeOfEvent": datetime.now(),
CHILDREN: [],
}
],
},
{TYPE: 'Text', TITLE: u'Ticker', ID: 'ticker', CHILDREN: []},
{TYPE: 'Text', TITLE: u'Hilfe', ID: 'help', CHILDREN: []},
{
TYPE: 'DashboardsConfig',
TITLE: u'Dokumentsammlungen Pinnwand',
ID: 'dbconfig',
CHILDREN: [],
},
],
}
] + SPECIAL_PAGES
def createBasicPortalStructure(plonesite, fresh):
"""
"""
createPloneObjects(plonesite, BASICSTRUCTURE, fresh)
def createContentConfig(plonesite, fresh):
"""
"""
createPloneObjects(plonesite, ADMINSTRUCTURE, fresh)
def createELANUsers(self):
# Set type for user folders
mtool = getToolByName(self, "portal_membership")
prefix = self.prefix or self.getId()
prefix = str(prefix)
title = self.Title()
mtool.addMember(
'%s_elanadmin' % prefix, 'ELAN Administrator (%s)' % title, [
'Member'], []
)
elanadmin = mtool.getMemberById('%s_elanadmin' % prefix)
elanadmin.setMemberProperties(
{"fullname": 'ELAN Administrator (%s)' % title, "dp": self.UID()}
)
elanadmin.setSecurityProfile(password="admin")
mtool.addMember(
'%s_contentadmin' % prefix, 'Content Admin (%s)' % title, [
'Member'], []
)
contentadmin = mtool.getMemberById('%s_contentadmin' % prefix)
contentadmin.setMemberProperties(
{"fullname": 'Content Admin (%s)' % title, "dp": self.UID()}
)
contentadmin.setSecurityProfile(password="admin")
def setELANLocalRoles(self):
"""
Normal local members: Reader
Administrators: Site Administrator
ContentAdministrators: Reviewer
Receivers: Owner, Editor
Senders: Contributor
"""
contentadmin = "{0}_ContentAdministrators"
set_local_roles(self, self, "{0}_SituationReportAdmins", ["SituationReportAdmin"])
set_local_roles(self, self.contentconfig, contentadmin, ["ContentAdmin"])
for pagedef in SPECIAL_PAGES:
name = pagedef[ID]
set_local_roles(self, self[name], contentadmin, ["ContentAdmin"])
set_local_roles(self, self.archive, contentadmin, ["DocPoolAdmin"])
set_local_roles(self, self.content.Groups, contentadmin, ["Site Administrator"])
set_local_roles(self, self.esd, contentadmin, ["ContentAdmin"])
set_local_roles(self, self, "{0}_ELANUsers", ["ELANUser"])
set_local_roles(self, self.config, contentadmin, ["Owner"])
def createELANGroups(self):
# We need local groups for
# - General access to the ESD
# - Administration
# - Content Administration
# - Receiving content from others
# - Sending content to others
prefix = self.prefix or self.getId()
prefix = str(prefix)
title = self.Title()
gtool = getToolByName(self, 'portal_groups')
gtool.addPrincipalToGroup('%s_elanadmin' % prefix, '%s_Members' % prefix)
gtool.addPrincipalToGroup(
'%s_contentadmin' %
prefix,
'%s_Members' %
prefix)
gtool.addPrincipalToGroup(
'%s_elanadmin' %
prefix,
'%s_Administrators' %
prefix)
# Content administrator group
props = {
'allowedDocTypes': [],
'title': 'Content Administrators (%s)' % title,
'description': 'Responsible for the definition of scenarios, ticker texts and additional content.',
'dp': self.UID(),
}
gtool.addGroup("%s_ContentAdministrators" % prefix, properties=props)
gtool.addPrincipalToGroup(
'%s_contentadmin' % prefix, '%s_ContentAdministrators' % prefix
)
# Group for ELAN application rights
props = {
'allowedDocTypes': [],
'title': 'ELAN Users (%s)' % title,
'description': 'Users with access to ELAN functions.',
'dp': self.UID(),
}
gtool.addGroup("%s_ELANUsers" % prefix, properties=props)
gtool.addPrincipalToGroup('%s_elanadmin' % prefix, '%s_ELANUsers' % prefix)
gtool.addPrincipalToGroup(
'%s_contentadmin' %
prefix,
'%s_ELANUsers' %
prefix)
gtool.addPrincipalToGroup('%s_dpadmin' % prefix, '%s_ELANUsers' % prefix)
# Group for Situation Report users
props = {
'allowedDocTypes': [],
'title': 'Situation Report Admins/Lagebild (%s)' % title,
'description': 'Users who can manage situation reports.',
'dp': self.UID(),
}
gtool.addGroup("%s_SituationReportAdmins" % prefix, properties=props)
gtool.addPrincipalToGroup(
'%s_contentadmin' % prefix, '%s_SituationReportAdmins' % prefix)
gtool.addPrincipalToGroup(
'%s_dpadmin' % prefix, '%s_SituationReportAdmins' % prefix)
gtool.addPrincipalToGroup(
'%s_elanadmin' % prefix, '%s_SituationReportAdmins' % prefix)
def copyCurrentSituation(self, fresh):
"""
"""
if not fresh:
return
esd = self.esd
from docpool.base.utils import _copyPaste
_copyPaste(esd, self, safe=False)
self.esd.setTitle(_("Aktuelle Lage"))
self.esd.reindexObject()
# make sure the current situation is first
self.moveObject("esd", 0)
def dpRemoved(self):
"""
@param self:
@return:
"""
return
| gpl-3.0 | 5,981,266,725,301,240,000 | 30.631783 | 107 | 0.62039 | false |
diegojromerolopez/djanban | src/djanban/apps/password_reseter/email_sender.py | 1 | 1645 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from crequest.middleware import CrequestMiddleware
from django.core.mail import send_mail
from django.template.loader import get_template
from django.conf import settings
from django.urls import reverse
def send_password_request_link(password_request, user):
current_request = CrequestMiddleware.get_request()
absolute_reset_password_url = current_request.build_absolute_uri(
reverse('password_reseter:reset_password', args=(password_request.uuid,))
)
replacements = {"user": user, "absolute_reset_password_url": absolute_reset_password_url}
txt_message = get_template('password_reseter/emails/request_password_reset.txt').render(replacements)
html_message = get_template('password_reseter/emails/request_password_reset.html').render(replacements)
subject = "Djanban :: Request password reset"
return send_mail(subject, txt_message, settings.EMAIL_HOST_USER, recipient_list=[user.email],
fail_silently=False, html_message=html_message)
# The password has been reset successfully
def send_password_reset_successfully_email(user):
replacements = {"user": user}
txt_message = get_template('password_reseter/emails/password_reset_successfully.txt').render(replacements)
html_message = get_template('password_reseter/emails/password_reset_successfully.html').render(replacements)
subject = "Djanban :: Password reset successfully"
return send_mail(subject, txt_message, settings.EMAIL_HOST_USER, recipient_list=[user.email],
fail_silently=False, html_message=html_message)
| mit | -3,453,166,151,156,797,000 | 43.459459 | 112 | 0.743465 | false |
seanxwzhang/LeetCode | 148 Sort List/solution.py | 1 | 1263 | #! /usr/bin/env python
# Sort a linked list in O(n log n) time using constant space complexity.
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# a merge sort implementation
class Solution(object):
def sortList(self, head):
if not head or not head.next:
return head
fast, slow = head.next, head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
# now slow reaches the middle of the list
mid = slow.next
slow.next = None
sorted_head = self.sortList(head)
sorted_mid = self.sortList(mid)
# now two sub lists are sorted, sort them in O(n)
dummyNode = ListNode(0)
track = dummyNode
while sorted_head and sorted_mid:
if sorted_head.val < sorted_mid.val:
track.next = sorted_head
sorted_head = sorted_head.next
else:
track.next = sorted_mid
sorted_mid = sorted_mid.next
track = track.next
if sorted_head: track.next = sorted_head
if sorted_mid: track.next = sorted_mid
return dummyNode.next
| mit | 6,358,962,998,563,687,000 | 33.162162 | 72 | 0.568488 | false |
crazcalm/chat-server | server.py | 1 | 11418 | import help_text
import asyncio
import argparse
import logging
from random import randint
clients = []
class SimpleChatClientProtocol(asyncio.Protocol):
"""
This class is the heart of the Chat Server. For each client that
connects to the server, an instance of this class is created. These
instances are saved in a global list.
"""
def __init__(self, name):
self.chatroom_name = name
def _send_msg(self, client, msg, format=True):
"""
This method sends messages clients to other clients
in the chatroom.
Args:
client (SimpleChatClientProtocol): A chat server client
msg (str): message to be sent
"""
if format:
client.transport.write("{}: {}\n".format(self.name,
msg).encode())
else:
client.transport.write("{}\n".format(msg).encode())
def _send_to_self(self, msg, client=False):
"""
This method sends messages to self. Typically used for
help dialogs and other interactions that are meant only
for this client.
Args:
msg (str): message to be sent
"""
if client:
self.transport.write("CLIENT**: {}".format(msg).encode())
else:
self.transport.write("{}\n".format(msg).encode())
def _unique_name(self, name):
"""
This method checks to see if the name that was passed
in as a parameter is unique among the names of the
clients in the chatroom.
Args:
name (str): a potential name
Return:
str or false: Returns False or name, which is Truthy
"""
logging.debug("Is the name {} unique?".format(name))
result = True
for client in clients:
logging.debug("Checking against: {}".format(client.name))
if name == client.name and self != client:
result = False
break
logging.debug("unique: {}".format(result))
return result
def connection_made(self, transport):
"""
This method designates what will happen when a client
makes a connection to the server.
Args:
transport (socket): The incoming socket from the client
"""
self.transport = transport
self.peername = transport.get_extra_info("peername")
self.name = "No Name"
while not self._unique_name(self.name):
self.name += str(randint(0, 9))
self.description = "None"
logging.info("connection_made: {}".format(self.peername).encode())
clients.append(self)
self._send_to_self("Welcome to {}!".format(self.chatroom_name))
self._send_to_self("To see the options available to you type `/help`")
self._send_to_self("Your username name is: {}".format(self.name))
self.send_to_everyone("<--- {} joined the room".format(self.name),
format=False)
def send_to_everyone(self, msg, format=True):
"""
This method sends a message to everyone in the chatroom.
Args:
msg (str): The message to be sent
"""
for client in clients:
self._send_msg(client, msg, format=format)
def find_client_by_name(self, name):
"""
This method attempts to find a client that has a
name that matches the name passed into the method.
If the client is found, a reference to that client
is returned. If the client is not found, then a None
object is returned.
Args:
name (str): The name used in the search
Returns:
False or client: False or client, which is truthy
"""
found = None
for client in clients:
if client.name.strip() == name:
found = client
break
return found
def send_to_list_of_people(self, people, msg):
"""
This method sends a message to a list of people.
Args:
people (list): list of clients
msg (str): The message to be sent
"""
# Currently not used. If I dediced to add groups
# to the app, then I will use this method.
for client in people:
self._send_msg(client, msg)
def data_received(self, data):
"""
This method is in charge of receiving the data that
has been sent from the client. The rules for how
this data is dealt with exist here.
Args:
data (byte): The data received over the socket connection
"""
msg = data.decode().strip()
logging.debug("data_received: {}".format(msg))
if msg == "/disconnect":
self.send_to_everyone("---> {} left the room".format(self.name),
format=False)
self.transport.close()
logging.info("command: /quit")
elif msg == "/whoami":
logging.info("command: /whoami")
self._send_to_self("You are {}\n".format(self.name))
self._send_to_self("Description: {}\n".format(
self.description))
elif msg == "/people":
logging.info("command: /people")
people = [client for client in clients if client != self]
if not people:
self._send_to_self("****No one else is in the room....*****")
for index, client in enumerate(people):
self._send_to_self("{}: {}\n".format(index, client.name))
elif msg == "/chatroom":
logging.info("command: /chatroom")
self._send_to_self("Chatroom name: {}".format(
self.chatroom_name))
elif msg == "/help":
logging.info("command: /help")
self._send_to_self("{}".format(help_text.HELP_GENERAL))
elif msg.startswith("/whois "):
if len(msg.split(' ')) >= 2:
command, name = msg.split(' ', 1)
logging.info("command: {}\Args: {}".format(
command, name))
found = self.find_client_by_name(name.strip())
if found:
self._send_to_self('Name: {}\nDescription: {}'.format(
found.name, found.description))
else:
self._send_to_self("I don't know")
else:
self._send_to_self(help_text.HELP_WHOIS)
elif msg.startswith("/msg "):
if len(msg.split(' ')) and ',' in msg:
args = msg.split(' ', 1)[1]
name, direct_msg = args.split(',', 1)
logging.info("command: /msg-{}, {}".format(name, direct_msg))
found = self.find_client_by_name(name.strip())
if found:
direct_msg = ''.join(direct_msg.strip())
self._send_msg(found, "*{}".format(direct_msg))
self._send_to_self('msg sent')
else:
logging.debug("Not Found: {}".format(name))
self._send_to_self('Could not find {}'.format(name))
else:
self._send_to_self(help_text.HELP_MSG)
elif msg.startswith("/help "):
command_args = msg.split(' ')[:2]
logging.info("command: {}".format(command_args))
error_msg = "{} is not a valid command".format(command_args[1])
msg = help_text.HELP_DICT.get(command_args[1], error_msg)
self._send_to_self(msg)
elif msg.startswith("/set "):
command_args = msg.strip().split(' ')
logging.info("command: {}\n".format(command_args))
key, value = None, None
if len(command_args) >= 3 and\
command_args[1] in ['name', 'description']:
key, *value = command_args[1:]
if key == 'name':
name = ' '.join(value)
if self._unique_name(name):
logging.debug('setting name to {}'.format(value))
self.name = name
self._send_to_self("Name: {}".format(self.name))
else:
self._send_to_self(
"The name you selected is all ready in use."
"\nPlease select another name.")
elif key == 'description':
logging.debug('setting description to {}'.format(value))
self.description = ' '.join(value)
self._send_to_self("Description: {}".format(
self.description))
else:
self._send_to_self(help_text.HELP_SET)
elif msg.startswith("/CLIENT**: USER LIST"):
logging.debug("/CLIENT**: USER LIST")
user_list = [client.name for client in clients]
self._send_to_self(",".join(user_list), client=True)
else:
self.send_to_everyone(msg)
def connection_lost(self, ex):
"""
This method fires when the connections between
the client and server is lost.
Args:
ex (I do not know): I should learn what you are...
"""
logging.info("connection_lost: {}".format(self.peername))
clients.remove(self)
def cli_parser():
"""
This function contains the logic for the command line
parser.
"""
chat_server = argparse.ArgumentParser(
description=help_text.CLI.get('description'),
epilog=help_text.CLI.get('epilog'))
chat_server.add_argument(
"--host",
type=str,
default="localhost",
help=help_text.CLI.get('host'))
chat_server.add_argument(
"--port",
type=int,
default=3333,
help=help_text.CLI.get('port'))
chat_server.add_argument(
"--name",
type=str,
default="Chat Room",
help=help_text.CLI.get('name'))
return chat_server
def run_server(host, port, name):
"""
This function is charge of running the server.
Args:
host (str): host name/ip address
port (int): port to which the app will run on
name (str): the name of the chatroom
"""
logging.info("starting up..")
print("Server running on {}:{}".format(host, port))
host = "127.0.0.1" if host == "localhost" else host
loop = asyncio.get_event_loop()
coro = loop.create_server(lambda: SimpleChatClientProtocol(name),
port=port, host=host)
server = loop.run_until_complete(coro)
for socket in server.sockets:
logging.info("serving on {}".format(socket.getsockname()))
loop.run_forever()
def main():
"""
This function contains the logic for the logger
and is in charge of running this application.
"""
logging.basicConfig(
filename="server_log",
filemode="w",
level=logging.DEBUG,
format='%(asctime)s--%(levelname)a--%(funcName)s--%(name)s:%(message)s'
)
cli_args = cli_parser().parse_args()
run_server(cli_args.host, cli_args.port, cli_args.name)
if __name__ == '__main__':
cli_args = cli_parser()
test = cli_args.parse_args()
main()
| mit | 894,940,125,265,857,700 | 32.881306 | 79 | 0.532055 | false |
jonwright/ImageD11 | ImageD11/sparseframe.py | 1 | 10124 |
from __future__ import print_function, division
import time, sys
import h5py, scipy.sparse, numpy as np, pylab as pl
from ImageD11 import cImageD11
# see also sandbox/harvest_pixels.py
NAMES = {
"filename" : "original filename used to create a sparse frame",
"intensity" : "corrected pixel values",
"nlabel": "Number of unique labels for an image labelling",
"threshold" : "Cut off used for thresholding",
}
class sparse_frame( object ):
"""
Indices / shape mapping
"""
def __init__(self, row, col, shape, itype=np.uint16, pixels=None):
""" row = slow direction
col = fast direction
shape = size of full image
itype = the integer type to store the indices
our c codes currently use unsigned short...
nnz is implicit as len(row)==len(col)
pixels = numpy arrays in a dict to name them
throw in a ary.attrs if you want to save some
"""
self.check( row, col, shape, itype )
self.shape = shape
self.row = np.asarray(row, dtype = itype )
self.col = np.asarray(col, dtype = itype )
self.nnz = len(self.row)
# Things we could have using those indices:
# raw pixel intensities
# corrected intensities
# smoothed pixel intensities
# labelling via different algorithms
self.pixels = {}
self.meta = {}
if pixels is not None:
for name, val in pixels.items():
assert len(val) == self.nnz
self.pixels[name] = val
def check(self, row, col, shape, itype):
""" Ensure the index data makes sense and fits """
lo = np.iinfo(itype).min
hi = np.iinfo(itype).max
assert len(shape) == 2
assert shape[0] >= lo and shape[0] < hi
assert shape[1] >= lo and shape[1] < hi
assert np.min(row) >= lo and np.max(row) < hi
assert np.min(col) >= lo and np.max(col) < hi
assert len(row) == len(col)
def is_sorted(self):
""" Tests whether the data are sorted into slow/fast order
rows are slow direction
columns are fast """
# TODO: non uint16 cases
assert self.row.dtype == np.uint16 and \
cImageD11.sparse_is_sorted( self.row, self.col ) == 0
def to_dense(self, data=None, out=None):
""" returns the full 2D image
data = name in self.pixels or 1D array matching self.nnz
Does not handle repeated indices
e.g. obj.to_dense( obj.pixels['raw_intensity'] )
"""
if data in self.pixels:
data = self.pixels[data] # give back this array
else:
ks = list( self.pixels.keys() )
if len(ks)==1:
data = self.pixels[ks[0]] # default for only one
else:
data = np.ones( self.nnz, np.bool ) # give a mask
if out is None:
out = np.zeros( self.shape, data.dtype )
else:
assert out.shape == self.shape
assert len(data) == self.nnz
adr = self.row.astype(np.intp) * self.shape[1] + self.col
out.flat[adr] = data
return out
def mask( self, msk ):
""" returns a subset of itself """
spf = sparse_frame( self.row[msk],
self.col[msk],
self.shape, self.row.dtype )
for name, px in self.pixels.items():
if name in self.meta:
m = self.meta[name].copy()
else:
m = None
spf.set_pixels( name, px[msk], meta = m )
return spf
def set_pixels( self, name, values, meta=None ):
""" Named arrays sharing these labels """
assert len(values) == self.nnz
self.pixels[name] = values
if meta is not None:
self.meta[name] = meta
def sort_by( self, name ):
""" Not sure when you would do this. For sorting
by a peak labelling to get pixels per peak """
assert name in self.pixels
order = np.argsort( self.pixels[name] )
self.reorder( self, order )
def sort( self ):
""" Puts you into slow / fast looping order """
order = np.lexsort( ( self.col, self.row ) )
self.reorder( self, order )
def reorder( self, order ):
""" Put the pixels into a different order (in place) """
assert len(order) == self.nnz
self.row[:] = self.row[order]
self.col[:] = self.col[order]
for name, px in self.pixels.items():
px[:] = px[order]
def threshold(self, threshold, name='intensity'):
"""
returns a new sparse frame with pixels > threshold
"""
return self.mask( self.pixels[name] > threshold )
def to_hdf_group( frame, group ):
""" Save a 2D sparse frame to a hdf group
Makes 1 single frame per group
"""
itype = np.dtype( frame.row.dtype )
meta = { "itype" : itype.name,
"shape0" : frame.shape[0],
"shape1" : frame.shape[1] }
for name, value in meta.items():
group.attrs[name] = value
opts = { "compression": "lzf",
"shuffle" : True,
}
#opts = {}
group.require_dataset( "row", shape=(frame.nnz,),
dtype=itype, **opts )
group.require_dataset( "col", shape=(frame.nnz,),
dtype=itype, **opts )
group['row'][:] = frame.row
group['col'][:] = frame.col
for pxname, px in frame.pixels.items():
group.require_dataset( pxname, shape=(frame.nnz,),
dtype=px.dtype,
**opts )
group[pxname][:] = px
if pxname in self.meta:
group[pxname].attrs = dict( self.meta[pxname] )
def from_data_mask( mask, data, header ):
"""
Create a sparse from a dense array
"""
assert mask.shape == data.shape
# using uint16 here - perhaps make this general in the future
# ... but not for now
assert data.shape[0] < pow(2,16)-1
assert data.shape[1] < pow(2,16)-1
nnz = (mask>0).sum()
tmp = np.empty( data.shape[0],'i') # tmp hold px per row cumsums
row = np.empty( nnz, np.uint16 )
col = np.empty( nnz, np.uint16 )
cImageD11.mask_to_coo( mask, row, col, tmp )
intensity = data[ mask > 0 ]
# intensity.attrs = dict(header) # FIXME USE xarray ?
spf = sparse_frame( row, col, data.shape, itype=np.uint16 )
spf.set_pixels( "intensity" , intensity, dict( header ) )
return spf
def from_hdf_group( group ):
itype = np.dtype( group.attrs['itype'] )
shape = group.attrs['shape0'], group.attrs['shape1']
row = group['row'][:] # read it
col = group['col'][:]
spf = sparse_frame( row, col, shape, itype=itype )
for pxname in list(group):
if pxname in ["row", "col"]:
continue
data = group[pxname][:]
header = dict( group[pxname].attrs )
spf.set_pixels( pxname, data, header )
return spf
def sparse_moments( frame, intensity_name, labels_name ):
""" We rely on a labelling array carrying nlabel metadata (==labels.data.max())"""
nl = frame.meta[ labels_name ][ "nlabel" ]
return cImageD11.sparse_blob2Dproperties(
frame.pixels[intensity_name],
frame.row,
frame.col,
frame.pixels[labels_name],
nl )
def overlaps(frame1, labels1, frame2, labels2):
"""
figures out which label of self matches which label of other
Assumes the zero label does not exist (background)
Returns sparse array of:
label in self (row)
label in other (col)
number of shared pixels (data)
"""
ki = np.empty( frame1.nnz, 'i' )
kj = np.empty( frame2.nnz, 'i' )
npx = cImageD11.sparse_overlaps( frame1.row, frame1.col, ki,
frame2.row, frame2.col, kj)
# self.data and other.data filled during init
row = frame1.pixels[labels1][ ki[:npx] ] # my labels
col = frame2.pixels[labels2][ kj[:npx] ] # your labels
ect = np.empty( npx, 'i') # ect = counts of overlaps
tj = np.empty( npx, 'i') # tj = temporary for sorting
n1 = frame1.meta[labels1][ "nlabel" ]
n2 = frame2.meta[labels2][ "nlabel" ]
tmp = np.empty( max(n1, n2)+1, 'i') # for histogram
nedge = cImageD11.compress_duplicates( row, col, ect, tj, tmp )
# overwrites row/col in place : ignore the zero label (hope it is not there)
crow = row[:nedge]-1
ccol = col[:nedge]-1
cdata = ect[:nedge]
cedges = scipy.sparse.coo_matrix( ( cdata, (crow, ccol)), shape=(n1, n2) )
# really?
return cedges
def sparse_connected_pixels( frame,
label_name="connectedpixels",
data_name="intensity",
threshold=None ):
"""
frame = a sparse frame
label_name = the array to save labels to in that frame
data_name = an array in that frame
threshold = float value or take data.threshold
"""
labels = np.zeros( frame.nnz, "i" )
if threshold is None:
threshold = frame.meta[data_name]["threshold"]
nlabel = cImageD11.sparse_connectedpixels(
frame.pixels[data_name], frame.row, frame.col,
threshold, labels )
frame.set_pixels( label_name, labels, { 'nlabel' : nlabel } )
return nlabel
def sparse_localmax( frame,
label_name="localmax",
data_name = "intensity" ):
labels = np.zeros( frame.nnz, "i" )
vmx = np.zeros( frame.nnz, np.float32 )
imx = np.zeros( frame.nnz, 'i')
nlabel = cImageD11.sparse_localmaxlabel(
frame.pixels[data_name], frame.row, frame.col,
vmx, imx, labels )
frame.set_pixels( label_name, labels, { "nlabel" : nlabel } )
return nlabel
| gpl-2.0 | 6,007,533,981,669,690,000 | 34.398601 | 86 | 0.55077 | false |
nksheridan/elephantAI | test_Deter_as_Server_and_Play_Audio.py | 1 | 1414 | # DETER DEVICE
# this is test code for putting the deter device into server mode, and getting a message via bluetooth from the detection device, and
# then going ahead and playing scare sounds. You need to determine your MAC address. It is for the server in this case, so the MAC address
# of the deter device. You also need to pair the deter device with the detection device via Bluetooth prior to using this. You can do
# that from the Bluetooth icon in the Raspian GUI.
import socket
import time
import os
import random
hostMACaddress = 'xxx'
port = 9
backlog = 1
size = 1024
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
s.bind((hostMACaddress, port))
s.listen(backlog)
print("We are waiting for a message from the detection device to arrive via bluetooth!")
try:
client, address = s.accept()
data = client.recv(size)
if data:
print(data)
client.send(data)
#echo back
except:
print("closing the socket")
client.close()
s.close()
message = str(data)
#convert the data received to a string
print(message)
if message == "b'yes_audio'":
print("play scare sounds now")
time.sleep(3)
scare_sounds = ['aplay bees1.wav', 'aplay bees2.wav', aplay bees3.wav']
i = 0
while i <10:
i = i+1
to_play = random.choice(scare_sounds)
print(to_play)
os.system(to_play)
print("Finished scare. Now can message detection device, and await another message from it")
| mit | 6,251,672,956,260,858,000 | 27.857143 | 138 | 0.737624 | false |
tyler-cromwell/Acid | client.py | 1 | 2439 | #!/usr/bin/python3
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
The MIT License (MIT)
Copyright (c) 2016 Tyler Cromwell
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import getopt
import readline
import socket
import sys
"""
Readline settings
"""
readline.parse_and_bind('tab: complete')
"""
Connection settings
"""
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_ip = '10.0.0.20'
client_port = 8888
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:p:', ['ipaddress=', 'port='])
""" Process command line arguments """
for o, a in opts:
if o == '-i' or o == '--ipaddress':
client_ip = a
elif o == '-p' or o == '--port':
client_port = int(a)
""" One-time send """
if len(sys.argv) > 1:
message = ''
for i in range(1, len(sys.argv)):
message += sys.argv[i]
if i < (len(sys.argv)-1):
message += ' '
client.sendto(message.encode('utf-8'), (client_ip, client_port))
""" Loop for message """
while len(sys.argv) >= 1:
user_input = input('UDP> ')
if user_input == 'quit' or user_input == 'exit':
break
client.sendto(user_input.encode('utf-8'), (client_ip, client_port))
except EOFError:
print()
except KeyboardInterrupt:
print()
| mit | -3,025,028,006,687,815,700 | 30.675325 | 80 | 0.621976 | false |
xenserver/auto-cert-kit | autocertkit/status.py | 1 | 3793 | #!/usr/bin/python
# Copyright (c) Citrix Systems Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""Module for checking the status of the kit. This will be of most interest
when the kit has rebooted in order to change it's backend, allowing automated clients
to keep track of progress."""
from test_report import *
import utils
import models
import os
TEST_FILE = "test_run.conf"
DEFAULT_RUN_LEVEL = 3
running = False
def get_process_strings():
ps = subprocess.Popen(
['ps', 'aux'], stdout=subprocess.PIPE).communicate()[0]
process_strings = []
for line in ps.split('\n'):
if 'ack_cli.py' in line or 'test_runner.py' in line:
process_strings.append(line)
return process_strings
def check_for_process():
process_strings = get_process_strings()
my_pid = str(os.getpid())
for line in process_strings:
if my_pid in line:
process_strings.remove(line)
if process_strings:
return True
def get_run_level():
output = subprocess.Popen(
['/sbin/runlevel'], stdout=subprocess.PIPE).communicate()[0]
_, level = output.split()
return int(level)
def main():
running = False
uptime_seconds = utils.os_uptime()
# Check for manifest file
if not os.path.exists(TEST_FILE):
print "4:Manifest file has not been created. Have run the kit? (Has an error occured?)"
sys.exit(0)
# Check for the python process
if check_for_process():
running = True
# Check the XML file to find out how many tests have been run
try:
ack_run = models.parse_xml(TEST_FILE)
except:
print "5:An error has occured reading. %s" % TEST_FILE
sys.exit(1)
p, f, s, w, r = ack_run.get_status()
if w+r == 0:
print "0:Finished (Passed:%d, Failed:%d, Skipped:%d)" % (p, f, s)
elif not running and uptime_seconds <= 600 and r > 0:
print "3:Server rebooting... (Passed:%d, Failed:%d, Skipped:%d, Waiting:%d, Running:%d)" % (
p, f, s, w, r)
elif not running and uptime_seconds > 600:
print "1:Process not running. An error has occurred. (Passed:%d, Failed:%d, Skipped: %d, Waiting:%d, Running:%d)" % (
p, f, s, w, r)
sys.exit(1)
else:
perc = float(p + f + s) / float(w + r + p + f + s) * 100
print "2:Running - %d%% Complete (Passed:%d, Failed:%d, Skipped:%d, Waiting:%d, Running:%d)" % (
perc, p, f, s, w, r)
if __name__ == "__main__":
main()
| bsd-2-clause | 7,740,039,314,793,611,000 | 32.866071 | 125 | 0.660691 | false |
metwit/django-fulmine | fulmine/forms.py | 1 | 2406 | from django import forms
from django.core.exceptions import ValidationError
from fulmine.models import parse_scope
class SeparatedValuesField(forms.CharField):
def __init__(self, *args, **kwargs):
self.separator = kwargs.pop('separator', ' ')
super(SeparatedValuesField, self).__init__(*args, **kwargs)
def clean(self, value):
if not value:
return []
return value.split(self.separator)
class AuthorizationForm(forms.Form):
response_type = forms.ChoiceField(
choices=[('code', 'code'), ('token', 'token')])
client_id = forms.CharField()
redirect_uri = forms.CharField(required=False)
scope = SeparatedValuesField(required=False)
state = forms.CharField(required=False)
def clean_scope(self):
scope = self.cleaned_data['scope']
return parse_scope(scope)
def clean_scope(form):
scope = form.cleaned_data['scope']
return parse_scope(scope)
def make_token_form(grant_type, required_fields=[], optional_fields=[],
django_fields={}):
class_dict = dict()
for field_name in optional_fields:
if field_name == 'scope':
field = SeparatedValuesField(required=False)
else:
field = forms.CharField(required=False)
class_dict[field_name] = field
for field_name in required_fields:
if field_name == 'scope':
field = SeparatedValuesField(required=True)
else:
field = forms.CharField(required=True)
class_dict[field_name] = field
for field_name, field in django_fields.iteritems():
class_dict[field_name] = field
class_dict['clean_scope'] = clean_scope
cls = type('%sTokenForm' % grant_type,
(forms.Form, ),
class_dict
)
return cls
AuthorizationCodeTokenForm = make_token_form('authorization_code',
required_fields=[
'code',
],
optional_fields=[
'redirect_uri',
'client_id',
'scope',
]
)
PasswordTokenForm = make_token_form('password',
required_fields=[
'username',
'password',
'scope',
]
)
ClientCredentialsTokenForm = make_token_form('client_credentials',
required_fields=['scope'],
)
RefreshTokenTokenForm = make_token_form('refresh_token',
required_fields=['refresh_token'],
optional_fields=['scope']
)
| bsd-3-clause | -1,758,042,214,765,443,600 | 24.870968 | 71 | 0.618869 | false |
gitcoinco/web | app/marketing/management/commands/no_applicants_email.py | 1 | 1922 | '''
Copyright (C) 2021 Gitcoin Core
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils import timezone
from dashboard.models import Bounty
from marketing.mails import no_applicant_reminder
class Command(BaseCommand):
help = 'sends reminder emails to funders whose bounties have 0 applications'
def handle(self, *args, **options):
if settings.DEBUG:
print("not active in non prod environments")
return
start_time_3_days = timezone.now() - timezone.timedelta(hours=24 * 3)
end_time_3_days = timezone.now() - timezone.timedelta(hours=24 * 4)
start_time_7_days = timezone.now() - timezone.timedelta(hours=24 * 7)
end_time_7_days = timezone.now() - timezone.timedelta(hours=24 * 8)
bounties = Bounty.objects.current().filter(
(Q(created_on__range=[end_time_3_days, start_time_3_days]) | Q(created_on__range=[end_time_7_days, start_time_7_days])),
idx_status='open',
network='mainnet'
)
for bounty in [b for b in bounties if b.no_of_applicants == 0]:
no_applicant_reminder(bounty.bounty_owner_email, bounty)
| agpl-3.0 | 4,106,764,754,830,823,400 | 39.041667 | 132 | 0.689386 | false |
kwikteam/klusta | klusta/kwik/tests/test_model.py | 1 | 12258 | # -*- coding: utf-8 -*-
"""Tests of Kwik file opening routines."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal as ae
from pytest import raises
from ...utils import captured_logging
from ..creator import create_kwik
from ..mea import MEA, staggered_positions
from ..mock import create_mock_kwik
from ..model import (KwikModel,
_list_channel_groups,
_list_channels,
_list_recordings,
_list_clusterings,
_concatenate_spikes,
)
#------------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------------
_N_CLUSTERS = 10
_N_SPIKES = 100
_N_CHANNELS = 28
_N_FETS = 2
_N_SAMPLES_TRACES = 10000
def test_kwik_utility(tempdir):
channels = list(range(_N_CHANNELS))
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES)
model = KwikModel(filename)
model._kwik.open()
assert _list_channel_groups(model._kwik.h5py_file) == [1]
assert _list_recordings(model._kwik.h5py_file) == [0, 1]
assert _list_clusterings(model._kwik.h5py_file, 1) == ['main',
'original',
]
assert _list_channels(model._kwik.h5py_file, 1) == channels
def test_concatenate_spikes():
spikes = [2, 3, 5, 0, 11, 1]
recs = [0, 0, 0, 1, 1, 2]
offsets = [0, 7, 100]
concat = _concatenate_spikes(spikes, recs, offsets)
ae(concat, [2, 3, 5, 7, 18, 101])
def test_kwik_empty(tempdir):
channels = [0, 3, 1]
graph = [[0, 3], [1, 0]]
probe = {'channel_groups': {
0: {'channels': channels,
'graph': graph,
'geometry': {0: (10, 10)},
}}}
sample_rate = 20000
kwik_path = op.join(tempdir, 'test.kwik')
create_kwik(kwik_path=kwik_path, probe=probe, sample_rate=sample_rate)
model = KwikModel(kwik_path)
ae(model.channels, sorted(channels))
ae(model.channel_order, channels)
assert model.sample_rate == sample_rate
assert model.n_channels == 3
assert model.spike_samples is None
assert model.n_spikes == 0
assert model.n_clusters == 0
model.describe()
def test_kwik_open_full(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES)
with raises(ValueError):
KwikModel()
# NOTE: n_channels - 2 because we use a special channel order.
nc = _N_CHANNELS - 2
# Test implicit open() method.
kwik = KwikModel(filename)
kwik.describe()
kwik.metadata
ae(kwik.channels, np.arange(_N_CHANNELS))
assert kwik.n_channels == _N_CHANNELS
assert kwik.n_spikes == _N_SPIKES
ae(kwik.channel_order, np.arange(1, _N_CHANNELS - 1)[::-1])
assert kwik.spike_samples.shape == (_N_SPIKES,)
assert kwik.spike_samples.dtype == np.int64
# Make sure the spike samples are increasing, even with multiple
# recordings.
# WARNING: need to cast to int64, otherwise negative values will
# overflow and be positive, making the test pass while the
# spike samples are *not* increasing!
assert np.all(np.diff(kwik.spike_samples.astype(np.int64)) >= 0)
assert kwik.spike_times.shape == (_N_SPIKES,)
assert kwik.spike_times.dtype == np.float64
assert kwik.spike_recordings.shape == (_N_SPIKES,)
assert kwik.spike_recordings.dtype == np.uint16
assert kwik.spike_clusters.shape == (_N_SPIKES,)
assert kwik.spike_clusters.min() in (0, 1, 2)
assert kwik.spike_clusters.max() in(_N_CLUSTERS - 2, _N_CLUSTERS - 1)
assert kwik.all_features.shape == (_N_SPIKES, nc, _N_FETS)
kwik.all_features[0, ...]
assert kwik.all_masks.shape == (_N_SPIKES, nc)
assert kwik.all_traces.shape == (_N_SAMPLES_TRACES, _N_CHANNELS)
assert kwik.all_waveforms[0].shape == (1, 40, nc)
assert kwik.all_waveforms[-1].shape == (1, 40, nc)
assert kwik.all_waveforms[-10].shape == (1, 40, nc)
assert kwik.all_waveforms[10].shape == (1, 40, nc)
assert kwik.all_waveforms[[10, 20]].shape == (2, 40, nc)
with raises(IndexError):
kwik.all_waveforms[_N_SPIKES + 10]
with raises(ValueError):
kwik.clustering = 'foo'
with raises(ValueError):
kwik.channel_group = 42
assert kwik.n_recordings == 2
# Test probe.
assert isinstance(kwik.probe, MEA)
assert kwik.probe.positions.shape == (nc, 2)
ae(kwik.probe.positions, staggered_positions(_N_CHANNELS)[1:-1][::-1])
kwik.close()
def test_kwik_open_no_kwx(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES,
with_kwx=False)
# Test implicit open() method.
kwik = KwikModel(filename)
kwik.close()
def test_kwik_open_no_kwd(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES,
with_kwd=False)
# Test implicit open() method.
kwik = KwikModel(filename)
with captured_logging() as buf:
kwik.all_waveforms[:]
# Enusure that there is no error message.
assert not buf.getvalue().strip()
kwik.close()
def test_kwik_save(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES)
kwik = KwikModel(filename)
cluster_groups = kwik.cluster_groups
sc_0 = kwik.spike_clusters.copy()
sc_1 = sc_0.copy()
new_cluster = _N_CLUSTERS + 10
sc_1[_N_SPIKES // 2:] = new_cluster
ae(kwik.spike_clusters, sc_0)
kwik.add_cluster_group(4, 'new')
cluster_groups[new_cluster] = 'new'
assert kwik.cluster_metadata[new_cluster] == 'unsorted'
kwik.save(sc_1, cluster_groups, {'test': (1, 2.)})
ae(kwik.spike_clusters, sc_1)
assert kwik.cluster_metadata[new_cluster] == 'new'
kwik.close()
kwik = KwikModel(filename)
ae(kwik.spike_clusters, sc_1)
assert kwik.cluster_metadata[new_cluster] == 'new'
ae(kwik.clustering_metadata['test'], [1, 2])
def test_kwik_clusterings(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES)
kwik = KwikModel(filename)
assert kwik.clusterings == ['main', 'original']
# The default clustering is 'main'.
assert kwik.n_spikes == _N_SPIKES
assert kwik.n_clusters == _N_CLUSTERS
ae(kwik.cluster_ids, np.arange(_N_CLUSTERS))
# Change clustering.
kwik.clustering = 'original'
n_clu = kwik.n_clusters
assert kwik.n_spikes == _N_SPIKES
# Some clusters may be empty with a small number of spikes like here
assert _N_CLUSTERS * 2 - 4 <= n_clu <= _N_CLUSTERS * 2
assert len(kwik.cluster_ids) == n_clu
def test_kwik_manage_clusterings(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES)
kwik = KwikModel(filename)
spike_clusters = kwik.spike_clusters
assert kwik.clusterings == ['main', 'original']
# Test renaming.
kwik.clustering = 'original'
with raises(ValueError):
kwik.rename_clustering('a', 'b')
with raises(ValueError):
kwik.rename_clustering('original', 'b')
with raises(ValueError):
kwik.rename_clustering('main', 'original')
kwik.clustering = 'main'
kwik.rename_clustering('original', 'original_2')
assert kwik.clusterings == ['main', 'original_2']
with raises(ValueError):
kwik.clustering = 'original'
kwik.clustering = 'original_2'
n_clu = kwik.n_clusters
assert len(kwik.cluster_ids) == n_clu
# Test copy.
with raises(ValueError):
kwik.copy_clustering('a', 'b')
with raises(ValueError):
kwik.copy_clustering('original', 'b')
with raises(ValueError):
kwik.copy_clustering('main', 'original_2')
# You cannot move the current clustering, but you can copy it.
with raises(ValueError):
kwik.rename_clustering('original_2', 'original_2_copy')
kwik.copy_clustering('original_2', 'original_2_copy')
kwik.delete_clustering('original_2_copy')
kwik.clustering = 'main'
kwik.copy_clustering('original_2', 'original')
assert kwik.clusterings == ['main', 'original', 'original_2']
kwik.clustering = 'original'
ci = kwik.cluster_ids
kwik.clustering = 'original_2'
ae(kwik.cluster_ids, ci)
# Test delete.
with raises(ValueError):
kwik.delete_clustering('a')
kwik.delete_clustering('original')
kwik.clustering = 'main'
kwik.delete_clustering('original_2')
assert kwik.clusterings == ['main', 'original']
# Test add.
sc = np.ones(_N_SPIKES, dtype=np.int32)
sc[1] = sc[-2] = 3
kwik.add_clustering('new', sc)
ae(kwik.spike_clusters, spike_clusters)
kwik.clustering = 'new'
ae(kwik.spike_clusters, sc)
assert kwik.n_clusters == 2
ae(kwik.cluster_ids, [1, 3])
def test_kwik_manage_cluster_groups(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES)
kwik = KwikModel(filename)
with raises(ValueError):
kwik.delete_cluster_group(2)
with raises(ValueError):
kwik.add_cluster_group(1, 'new')
with raises(ValueError):
kwik.rename_cluster_group(1, 'renamed')
kwik.add_cluster_group(4, 'new')
kwik.rename_cluster_group(4, 'renamed')
kwik.delete_cluster_group(4)
with raises(ValueError):
kwik.delete_cluster_group(4)
| bsd-3-clause | 1,594,561,297,690,360,600 | 32.400545 | 79 | 0.556535 | false |
MikeLing/treeherder | treeherder/log_parser/parsers.py | 1 | 19774 | import json
import logging
import re
from HTMLParser import HTMLParser
import jsonschema
from django.conf import settings
from treeherder.etl.buildbot import RESULT_DICT
logger = logging.getLogger(__name__)
class ParserBase(object):
"""
Base class for all parsers.
"""
def __init__(self, name):
"""Setup the artifact to hold the extracted data."""
self.name = name
self.clear()
def clear(self):
"""Reset this parser's values for another run."""
self.artifact = []
self.complete = False
def parse_line(self, line, lineno):
"""Parse a single line of the log"""
raise NotImplementedError # pragma no cover
def finish_parse(self, last_lineno_seen):
"""Clean-up/summary tasks run at the end of parsing."""
pass
def get_artifact(self):
"""By default, just return the artifact as-is."""
return self.artifact
class StepParser(ParserBase):
"""
Parse out individual job steps within a log.
Step format:
"steps": [
{
"errors": [],
"name": "set props: master", # the name of the process on start line
"started": "2013-06-05 12:39:57.838527",
"started_linenumber": 8,
"finished_linenumber": 10,
"finished": "2013-06-05 12:39:57.839226",
"result": 0
},
...
]
"""
# Matches the half-dozen 'key: value' header lines printed at the start of each
# Buildbot job log. The list of keys are taken from:
# https://hg.mozilla.org/build/buildbotcustom/file/644c3860300a/bin/log_uploader.py#l126
RE_HEADER_LINE = re.compile(r'(?:builder|slave|starttime|results|buildid|builduid|revision): .*')
# Step marker lines, eg:
# ========= Started foo (results: 0, elapsed: 0 secs) (at 2015-08-17 02:33:56.353866) =========
# ========= Finished foo (results: 0, elapsed: 0 secs) (at 2015-08-17 02:33:56.354301) =========
RE_STEP_MARKER = re.compile(r'={9} (?P<marker_type>Started|Finished) (?P<name>.*?) '
r'\(results: (?P<result_code>\d+), elapsed: .*?\) '
r'\(at (?P<timestamp>.*?)\)')
STATES = {
# The initial state until we record the first step.
"awaiting_first_step": 0,
# We've started a step, but not yet seen the end of it.
"step_in_progress": 1,
# We've seen the end of the previous step.
"step_finished": 2,
}
# date format in a step started/finished header
DATE_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
def __init__(self):
"""Setup the artifact to hold the header lines."""
super(StepParser, self).__init__("step_data")
self.stepnum = -1
self.artifact = {
"steps": [],
"errors_truncated": False
}
self.sub_parser = ErrorParser()
self.state = self.STATES['awaiting_first_step']
def parse_line(self, line, lineno):
"""Parse a single line of the log.
We have to handle both buildbot style logs as well as Taskcluster logs. The latter
attempt to emulate the buildbot logs, but don't accurately do so, partly due
to the way logs are generated in Taskcluster (ie: on the workers themselves).
Buildbot logs:
builder: ...
slave: ...
starttime: ...
results: ...
buildid: ...
builduid: ...
revision: ...
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
Taskcluster logs (a worst-case example):
<log output outside a step>
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
<log output outside a step>
======= <step START marker> =======
<step log output with no following finish marker>
As can be seen above, Taskcluster logs can have (a) log output that falls between
step markers, and (b) content at the end of the log, that is not followed by a
final finish step marker. We handle this by creating generic placeholder steps to
hold the log output that is not enclosed by step markers, and then by cleaning up
the final step in finish_parse() once all lines have been parsed.
"""
if not line.strip():
# Skip whitespace-only lines, since they will never contain an error line,
# so are not of interest. This also avoids creating spurious unnamed steps
# (which occurs when we find content outside of step markers) for the
# newlines that separate the steps in Buildbot logs.
return
if self.state == self.STATES['awaiting_first_step'] and self.RE_HEADER_LINE.match(line):
# The "key: value" job metadata header lines that appear at the top of
# Buildbot logs would result in the creation of an unnamed step at the
# start of the job, unless we skip them. (Which is not desired, since
# the lines are metadata and not test/build output.)
return
step_marker_match = self.RE_STEP_MARKER.match(line)
if not step_marker_match:
# This is a normal log line, rather than a step marker. (The common case.)
if self.state != self.STATES['step_in_progress']:
# We don't have an in-progress step, so need to start one, even though this
# isn't a "step started" marker line. We therefore create a new generic step,
# since we have no way of finding out the step metadata. This case occurs
# for the Taskcluster logs where content can fall between step markers.
self.start_step(lineno)
# Parse the line for errors, which if found, will be associated with the current step.
self.sub_parser.parse_line(line, lineno)
return
# This is either a "step started" or "step finished" marker line, eg:
# ========= Started foo (results: 0, elapsed: 0 secs) (at 2015-08-17 02:33:56.353866) =========
# ========= Finished foo (results: 0, elapsed: 0 secs) (at 2015-08-17 02:33:56.354301) =========
if step_marker_match.group('marker_type') == 'Started':
if self.state == self.STATES['step_in_progress']:
# We're partway through a step (ie: haven't seen a "step finished" marker line),
# but have now reached the "step started" marker for the next step. Before we
# can start the new step, we have to clean up the previous one - albeit using
# generic step metadata, since there was no "step finished" marker. This occurs
# in Taskcluster's logs when content falls between the step marker lines.
self.end_step(lineno)
# Start a new step using the extracted step metadata.
self.start_step(lineno,
name=step_marker_match.group('name'),
timestamp=step_marker_match.group('timestamp'))
return
# This is a "step finished" marker line.
if self.state != self.STATES['step_in_progress']:
# We're not in the middle of a step, so can't finish one. Just ignore the marker line.
return
# Close out the current step using the extracted step metadata.
self.end_step(lineno,
timestamp=step_marker_match.group('timestamp'),
result_code=int(step_marker_match.group('result_code')))
def start_step(self, lineno, name="Unnamed step", timestamp=None):
"""Create a new step and update the state to reflect we're now in the middle of a step."""
self.state = self.STATES['step_in_progress']
self.stepnum += 1
self.steps.append({
"name": name,
"started": timestamp,
"started_linenumber": lineno,
"errors": [],
})
def end_step(self, lineno, timestamp=None, result_code=None):
"""Fill in the current step's summary and update the state to show the current step has ended."""
self.state = self.STATES['step_finished']
step_errors = self.sub_parser.get_artifact()
step_error_count = len(step_errors)
if step_error_count > settings.PARSER_MAX_STEP_ERROR_LINES:
step_errors = step_errors[:settings.PARSER_MAX_STEP_ERROR_LINES]
self.artifact["errors_truncated"] = True
self.current_step.update({
"finished": timestamp,
"finished_linenumber": lineno,
# Whilst the result code is present on both the start and end buildbot-style step
# markers, for Taskcluster logs the start marker line lies about the result, since
# the log output is unbuffered, so Taskcluster does not know the real result at
# that point. As such, we only set the result when ending a step.
"result": RESULT_DICT.get(result_code, "unknown"),
"errors": step_errors
})
# reset the sub_parser for the next step
self.sub_parser.clear()
def finish_parse(self, last_lineno_seen):
"""Clean-up/summary tasks run at the end of parsing."""
if self.state == self.STATES['step_in_progress']:
# We've reached the end of the log without seeing the final "step finish"
# marker, which would normally have triggered updating the step. As such we
# must manually close out the current step, so things like result, finish
# time are set for it. This ensures that the error summary for Taskcluster
# infra failures actually lists the error that occurs at the
# end of the log.
self.end_step(last_lineno_seen)
@property
def steps(self):
"""Return the list of steps in the artifact"""
return self.artifact["steps"]
@property
def current_step(self):
"""Return the current step in the artifact"""
return self.steps[self.stepnum]
class TinderboxPrintParser(ParserBase):
RE_TINDERBOXPRINT = re.compile(r'.*TinderboxPrint: ?(?P<line>.*)$')
RE_UPLOADED_TO = re.compile(
r"<a href=['\"](?P<url>http(s)?://.*)['\"]>(?P<value>.+)</a>: uploaded"
)
RE_LINK_HTML = re.compile(
(r"((?P<title>[A-Za-z/\.0-9\-_ ]+): )?"
r"<a .*href=['\"](?P<url>http(s)?://.+)['\"].*>(?P<value>.+)</a>")
)
RE_LINK_TEXT = re.compile(
r"((?P<title>[A-Za-z/\.0-9\-_ ]+): )?(?P<url>http(s)?://.*)"
)
TINDERBOX_REGEXP_TUPLE = (
{
're': RE_UPLOADED_TO,
'base_dict': {
"content_type": "link",
"title": "artifact uploaded"
},
'duplicates_fields': {}
},
{
're': RE_LINK_HTML,
'base_dict': {
"content_type": "link"
},
'duplicates_fields': {}
},
{
're': RE_LINK_TEXT,
'base_dict': {
"content_type": "link"
},
'duplicates_fields': {'value': 'url'}
}
)
def __init__(self):
"""Setup the artifact to hold the job details."""
super(TinderboxPrintParser, self).__init__("job_details")
def parse_line(self, line, lineno):
"""Parse a single line of the log"""
match = self.RE_TINDERBOXPRINT.match(line) if line else None
if match:
line = match.group('line')
for regexp_item in self.TINDERBOX_REGEXP_TUPLE:
match = regexp_item['re'].match(line)
if match:
artifact = match.groupdict()
# handle duplicate fields
for to_field, from_field in regexp_item['duplicates_fields'].items():
# if to_field not present or None copy form from_field
if to_field not in artifact or artifact[to_field] is None:
artifact[to_field] = artifact[from_field]
artifact.update(regexp_item['base_dict'])
self.artifact.append(artifact)
return
# default case: consider it html content
# try to detect title/value splitting on <br/>
artifact = {"content_type": "raw_html", }
if "<br/>" in line:
title, value = line.split("<br/>", 1)
artifact["title"] = title
artifact["value"] = value
# or similar long lines if they contain a url
elif "href" in line and "title" in line:
def parse_url_line(line_data):
class TpLineParser(HTMLParser):
def handle_starttag(self, tag, attrs):
d = dict(attrs)
artifact["url"] = d['href']
artifact["title"] = d['title']
def handle_data(self, data):
artifact["value"] = data
p = TpLineParser()
p.feed(line_data)
p.close()
# strip ^M returns on windows lines otherwise
# handle_data will yield no data 'value'
parse_url_line(line.replace('\r', ''))
else:
artifact["value"] = line
self.artifact.append(artifact)
class ErrorParser(ParserBase):
"""A generic error detection sub-parser"""
IN_SEARCH_TERMS = (
"TEST-UNEXPECTED-",
"fatal error",
"FATAL ERROR",
"REFTEST ERROR",
"PROCESS-CRASH",
"Assertion failure:",
"Assertion failed:",
"###!!! ABORT:",
"E/GeckoLinker",
"SUMMARY: AddressSanitizer",
"SUMMARY: LeakSanitizer",
"Automation Error:",
"command timed out:",
"wget: unable ",
"TEST-VALGRIND-ERROR",
"[ FAILED ] ",
)
RE_ERR_MATCH = re.compile((
r"^error: TEST FAILED"
r"|^g?make(?:\[\d+\])?: \*\*\*"
r"|^Remote Device Error:"
r"|^[A-Za-z.]+Error: "
r"|^[A-Za-z.]*Exception: "
r"|^remoteFailed:"
r"|^rm: cannot "
r"|^abort:"
r"|^Output exceeded \d+ bytes"
r"|^The web-page 'stop build' button was pressed"
r"|.*\.js: line \d+, col \d+, Error -"
r"|^\[taskcluster\] Error:"
r"|^\[[\w._-]+:(?:error|exception)\]"
))
RE_ERR_SEARCH = re.compile((
r" error\(\d*\):"
r"|:\d+: error:"
r"| error R?C\d*:"
r"|ERROR [45]\d\d:"
r"|mozmake\.(?:exe|EXE)(?:\[\d+\])?: \*\*\*"
))
RE_EXCLUDE_1_SEARCH = re.compile(r"TEST-(?:INFO|PASS) ")
RE_EXCLUDE_2_SEARCH = re.compile(
r"I[ /](Gecko|Robocop|TestRunner).*TEST-UNEXPECTED-"
r"|^TimeoutException: "
r"|^ImportError: No module named pygtk$"
)
RE_ERR_1_MATCH = re.compile(r"^\d+:\d+:\d+ +(?:ERROR|CRITICAL|FATAL) - ")
# Looks for a leading value inside square brackets containing a "YYYY-"
# year pattern but isn't a TaskCluster error indicator (like
# ``taskcluster:error``.
#
# This matches the following:
# [task 2016-08-18T17:50:56.955523Z]
# [2016- task]
#
# But not:
# [taskcluster:error]
# [taskcluster:something 2016-]
RE_TASKCLUSTER_NORMAL_PREFIX = re.compile(r"^\[(?!taskcluster:)[^\]]*20\d{2}-[^\]]+\]\s")
RE_MOZHARNESS_PREFIX = re.compile(r"^\d+:\d+:\d+ +(?:DEBUG|INFO|WARNING) - +")
def __init__(self):
"""A simple error detection sub-parser"""
super(ErrorParser, self).__init__("errors")
self.is_taskcluster = False
def add(self, line, lineno):
self.artifact.append({
"linenumber": lineno,
"line": line.rstrip()
})
def parse_line(self, line, lineno):
"""Check a single line for an error. Keeps track of the linenumber"""
# TaskCluster logs are a bit wonky.
#
# TaskCluster logs begin with output coming from TaskCluster itself,
# before it has transitioned control of the task to the configured
# process. These "internal" logs look like the following:
#
# [taskcluster 2016-09-09 17:41:43.544Z] Worker Group: us-west-2b
#
# If an error occurs during this "setup" phase, TaskCluster may emit
# lines beginning with ``[taskcluster:error]``.
#
# Once control has transitioned from TaskCluster to the configured
# task process, lines can be whatever the configured process emits.
# The popular ``run-task`` wrapper prefixes output to emulate
# TaskCluster's "internal" logs. e.g.
#
# [vcs 2016-09-09T17:45:02.842230Z] adding changesets
#
# This prefixing can confuse error parsing. So, we strip it.
#
# Because regular expression matching and string manipulation can be
# expensive when performed on every line, we only strip the TaskCluster
# log prefix if we know we're in a TaskCluster log.
# First line of TaskCluster logs almost certainly has this.
if line.startswith('[taskcluster '):
self.is_taskcluster = True
# For performance reasons, only do this if we have identified as
# a TC task.
if self.is_taskcluster:
line = re.sub(self.RE_TASKCLUSTER_NORMAL_PREFIX, "", line)
if self.is_error_line(line):
self.add(line, lineno)
def is_error_line(self, line):
if self.RE_EXCLUDE_1_SEARCH.search(line):
return False
if self.RE_ERR_1_MATCH.match(line):
return True
# Remove mozharness prefixes prior to matching
trimline = re.sub(self.RE_MOZHARNESS_PREFIX, "", line).rstrip()
if self.RE_EXCLUDE_2_SEARCH.search(trimline):
return False
return bool(any(term for term in self.IN_SEARCH_TERMS if term in trimline) or
self.RE_ERR_MATCH.match(trimline) or self.RE_ERR_SEARCH.search(trimline))
class PerformanceParser(ParserBase):
"""a sub-parser to find generic performance data"""
# Using $ in the regex as an end of line bounds causes the
# regex to fail on windows logs. This is likely due to the
# ^M character representation of the windows end of line.
RE_PERFORMANCE = re.compile(r'.*?PERFHERDER_DATA:\s+({.*})')
PERF_SCHEMA = json.load(open('schemas/performance-artifact.json'))
def __init__(self):
super(PerformanceParser, self).__init__("performance_data")
def parse_line(self, line, lineno):
match = self.RE_PERFORMANCE.match(line)
if match:
try:
dict = json.loads(match.group(1))
jsonschema.validate(dict, self.PERF_SCHEMA)
self.artifact.append(dict)
except ValueError:
logger.warning("Unable to parse Perfherder data from line: %s",
line)
except jsonschema.ValidationError as e:
logger.warning("Perfherder line '%s' does not comply with "
"json schema: %s", line, e.message)
# Don't mark the parser as complete, in case there are multiple performance artifacts.
| mpl-2.0 | 743,858,351,912,040,100 | 38.469062 | 105 | 0.557904 | false |
karlwithak/nowradio | nowradio/stationInfoUpdater.py | 1 | 1896 | import requests
import ourUtils
from dbManager import Queries, get_connection
# This program goes through the list of stations in the db and updates information such as
# current listeners, max listeners, peak listeners, status(up or not)
def worker(id_url_list, connection):
cur = connection.cursor()
for id_ip in id_url_list:
url = "http://" + id_ip[1] + '/7.html'
try:
response = requests.get(url, headers=ourUtils.request_header, timeout=2)
except requests.ConnectionError:
print("connection error: " + url)
cur.execute(Queries.set_station_down, (id_ip[0],))
except requests.Timeout:
print("timeout error : " + url)
cur.execute(Queries.set_station_down, (id_ip[0],))
except Exception:
print("unknown error : " + url)
cur.execute(Queries.set_station_down, (id_ip[0],))
else:
if response.status_code in (200, 304) \
and response.text.count(",") >= 6 \
and len(response.text) < 2048:
info = response.text.split(",")
data = {
'is_up': bool(info[1]),
'peak': info[2],
'max': info[3],
'active': info[4],
'id': id_ip[0]
}
cur.execute(Queries.update_station_by_id, data)
else:
print("bad response: " + url)
cur.execute(Queries.set_station_down, (id_ip[0],))
cur.close()
def main():
conn = get_connection()
if conn is None:
exit("could not connect to db")
id_url_list = ourUtils.db_quick_query(conn, Queries.get_all_ips)
ourUtils.multi_thread_runner(id_url_list, worker, conn)
conn.commit()
conn.close()
if __name__ == '__main__':
main()
| mit | 3,592,722,863,100,787,000 | 34.111111 | 90 | 0.533228 | false |
dplepage/logbook | tests/test_logbook.py | 1 | 59981 | # -*- coding: utf-8 -*-
from .utils import (
LogbookTestCase,
activate_via_push_pop,
activate_via_with_statement,
capturing_stderr_context,
get_total_delta_seconds,
make_fake_mail_handler,
missing,
require_module,
require_py3,
)
from contextlib import closing, contextmanager
from datetime import datetime, timedelta
from random import randrange
import logbook
from logbook.helpers import StringIO, xrange, iteritems, zip, u
import os
import pickle
import re
import shutil
import socket
import sys
import tempfile
import time
import json
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
__file_without_pyc__ = __file__
if __file_without_pyc__.endswith(".pyc"):
__file_without_pyc__ = __file_without_pyc__[:-1]
LETTERS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
class _BasicAPITestCase(LogbookTestCase):
def test_basic_logging(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
self.log.warn('This is a warning. Nice hah?')
self.assert_(handler.has_warning('This is a warning. Nice hah?'))
self.assertEqual(handler.formatted_records, [
'[WARNING] testlogger: This is a warning. Nice hah?'
])
def test_extradict(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
self.log.warn('Test warning')
record = handler.records[0]
record.extra['existing'] = 'foo'
self.assertEqual(record.extra['nonexisting'], '')
self.assertEqual(record.extra['existing'], 'foo')
self.assertEqual(repr(record.extra),
'ExtraDict({\'existing\': \'foo\'})')
def test_custom_logger(self):
client_ip = '127.0.0.1'
class CustomLogger(logbook.Logger):
def process_record(self, record):
record.extra['ip'] = client_ip
custom_log = CustomLogger('awesome logger')
fmt = '[{record.level_name}] {record.channel}: ' \
'{record.message} [{record.extra[ip]}]'
handler = logbook.TestHandler(format_string=fmt)
self.assertEqual(handler.format_string, fmt)
with self.thread_activation_strategy(handler):
custom_log.warn('Too many sounds')
self.log.warn('"Music" playing')
self.assertEqual(handler.formatted_records, [
'[WARNING] awesome logger: Too many sounds [127.0.0.1]',
'[WARNING] testlogger: "Music" playing []'
])
def test_handler_exception(self):
class ErroringHandler(logbook.TestHandler):
def emit(self, record):
raise RuntimeError('something bad happened')
with capturing_stderr_context() as stderr:
with self.thread_activation_strategy(ErroringHandler()) as handler:
self.log.warn('I warn you.')
self.assert_('something bad happened' in stderr.getvalue())
self.assert_('I warn you' not in stderr.getvalue())
def test_formatting_exception(self):
def make_record():
return logbook.LogRecord('Test Logger', logbook.WARNING,
'Hello {foo:invalid}',
kwargs={'foo': 42},
frame=sys._getframe())
record = make_record()
with self.assertRaises(TypeError) as caught:
record.message
errormsg = str(caught.exception)
self.assertRegexpMatches(errormsg,
"Could not format message with provided arguments: Invalid (?:format specifier)|(?:conversion specification)|(?:format spec)")
self.assertIn("msg='Hello {foo:invalid}'", errormsg)
self.assertIn('args=()', errormsg)
self.assertIn("kwargs={'foo': 42}", errormsg)
self.assertRegexpMatches(
errormsg,
r'Happened in file .*%s, line \d+' % __file_without_pyc__)
def test_exception_catching(self):
logger = logbook.Logger('Test')
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
self.assertFalse(handler.has_error())
try:
1 / 0
except Exception:
logger.exception()
try:
1 / 0
except Exception:
logger.exception('Awesome')
self.assert_(handler.has_error('Uncaught exception occurred'))
self.assert_(handler.has_error('Awesome'))
self.assertIsNotNone(handler.records[0].exc_info)
self.assertIn('1 / 0', handler.records[0].formatted_exception)
def test_exc_info_tuple(self):
self._test_exc_info(as_tuple=True)
def test_exc_info_true(self):
self._test_exc_info(as_tuple=False)
def _test_exc_info(self, as_tuple):
logger = logbook.Logger("Test")
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
try:
1 / 0
except Exception:
exc_info = sys.exc_info()
logger.info("Exception caught", exc_info=exc_info if as_tuple else True)
self.assertIsNotNone(handler.records[0].exc_info)
self.assertEquals(handler.records[0].exc_info, exc_info)
def test_exporting(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
try:
1 / 0
except Exception:
self.log.exception()
record = handler.records[0]
exported = record.to_dict()
record.close()
imported = logbook.LogRecord.from_dict(exported)
for key, value in iteritems(record.__dict__):
if key[0] == '_':
continue
self.assertEqual(value, getattr(imported, key))
def test_pickle(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
try:
1 / 0
except Exception:
self.log.exception()
record = handler.records[0]
record.pull_information()
record.close()
for p in xrange(pickle.HIGHEST_PROTOCOL):
exported = pickle.dumps(record, p)
imported = pickle.loads(exported)
for key, value in iteritems(record.__dict__):
if key[0] == '_':
continue
imported_value = getattr(imported, key)
if isinstance(value, ZeroDivisionError):
# in Python 3.2, ZeroDivisionError(x) != ZeroDivisionError(x)
self.assert_(type(value) is type(imported_value))
self.assertEqual(value.args, imported_value.args)
else:
self.assertEqual(value, imported_value)
def test_timedate_format(self):
"""
tests the logbook.set_datetime_format() function
"""
FORMAT_STRING = '{record.time:%H:%M:%S} {record.message}'
handler = logbook.TestHandler(format_string=FORMAT_STRING)
handler.push_thread()
logbook.set_datetime_format('utc')
try:
self.log.warn('This is a warning.')
time_utc = handler.records[0].time
logbook.set_datetime_format('local')
self.log.warn('This is a warning.')
time_local = handler.records[1].time
finally:
handler.pop_thread()
# put back the default time factory
logbook.set_datetime_format('utc')
# get the expected difference between local and utc time
t1 = datetime.now()
t2 = datetime.utcnow()
tz_minutes_diff = get_total_delta_seconds(t1 - t2)/60.0
if abs(tz_minutes_diff) < 1:
self.skipTest("Cannot test utc/localtime differences if they vary by less than one minute...")
# get the difference between LogRecord local and utc times
logbook_minutes_diff = get_total_delta_seconds(time_local - time_utc)/60.0
self.assertGreater(abs(logbook_minutes_diff), 1, "Localtime does not differ from UTC by more than 1 minute (Local: %s, UTC: %s)" % (time_local, time_utc))
ratio = logbook_minutes_diff / tz_minutes_diff
self.assertGreater(ratio, 0.99)
self.assertLess(ratio, 1.01)
class BasicAPITestCase_Regular(_BasicAPITestCase):
def setUp(self):
super(BasicAPITestCase_Regular, self).setUp()
self.thread_activation_strategy = activate_via_with_statement
class BasicAPITestCase_Contextmgr(_BasicAPITestCase):
def setUp(self):
super(BasicAPITestCase_Contextmgr, self).setUp()
self.thread_activation_strategy = activate_via_push_pop
class _HandlerTestCase(LogbookTestCase):
def setUp(self):
super(_HandlerTestCase, self).setUp()
self.dirname = tempfile.mkdtemp()
self.filename = os.path.join(self.dirname, 'log.tmp')
def tearDown(self):
shutil.rmtree(self.dirname)
super(_HandlerTestCase, self).tearDown()
def test_file_handler(self):
handler = logbook.FileHandler(self.filename,
format_string='{record.level_name}:{record.channel}:'
'{record.message}',)
with self.thread_activation_strategy(handler):
self.log.warn('warning message')
handler.close()
with open(self.filename) as f:
self.assertEqual(f.readline(),
'WARNING:testlogger:warning message\n')
def test_file_handler_unicode(self):
with capturing_stderr_context() as captured:
with self.thread_activation_strategy(logbook.FileHandler(self.filename)) as h:
self.log.info(u('\u0431'))
self.assertFalse(captured.getvalue())
def test_file_handler_delay(self):
handler = logbook.FileHandler(self.filename,
format_string='{record.level_name}:{record.channel}:'
'{record.message}', delay=True)
self.assertFalse(os.path.isfile(self.filename))
with self.thread_activation_strategy(handler):
self.log.warn('warning message')
handler.close()
with open(self.filename) as f:
self.assertEqual(f.readline(),
'WARNING:testlogger:warning message\n')
def test_monitoring_file_handler(self):
if os.name == "nt":
self.skipTest("unsupported on windows due to different IO (also unneeded)")
handler = logbook.MonitoringFileHandler(self.filename,
format_string='{record.level_name}:{record.channel}:'
'{record.message}', delay=True)
with self.thread_activation_strategy(handler):
self.log.warn('warning message')
os.rename(self.filename, self.filename + '.old')
self.log.warn('another warning message')
handler.close()
with open(self.filename) as f:
self.assertEqual(f.read().strip(),
'WARNING:testlogger:another warning message')
def test_custom_formatter(self):
def custom_format(record, handler):
return record.level_name + ':' + record.message
handler = logbook.FileHandler(self.filename)
with self.thread_activation_strategy(handler):
handler.formatter = custom_format
self.log.warn('Custom formatters are awesome')
with open(self.filename) as f:
self.assertEqual(f.readline(),
'WARNING:Custom formatters are awesome\n')
def test_rotating_file_handler(self):
basename = os.path.join(self.dirname, 'rot.log')
handler = logbook.RotatingFileHandler(basename, max_size=2048,
backup_count=3,
)
handler.format_string = '{record.message}'
with self.thread_activation_strategy(handler):
for c, x in zip(LETTERS, xrange(32)):
self.log.warn(c * 256)
files = [x for x in os.listdir(self.dirname)
if x.startswith('rot.log')]
files.sort()
self.assertEqual(files, ['rot.log', 'rot.log.1', 'rot.log.2',
'rot.log.3'])
with open(basename) as f:
self.assertEqual(f.readline().rstrip(), 'C' * 256)
self.assertEqual(f.readline().rstrip(), 'D' * 256)
self.assertEqual(f.readline().rstrip(), 'E' * 256)
self.assertEqual(f.readline().rstrip(), 'F' * 256)
def test_timed_rotating_file_handler(self):
basename = os.path.join(self.dirname, 'trot.log')
handler = logbook.TimedRotatingFileHandler(basename, backup_count=3)
handler.format_string = '[{record.time:%H:%M}] {record.message}'
def fake_record(message, year, month, day, hour=0,
minute=0, second=0):
lr = logbook.LogRecord('Test Logger', logbook.WARNING,
message)
lr.time = datetime(year, month, day, hour, minute, second)
return lr
with self.thread_activation_strategy(handler):
for x in xrange(10):
handler.handle(fake_record('First One', 2010, 1, 5, x + 1))
for x in xrange(20):
handler.handle(fake_record('Second One', 2010, 1, 6, x + 1))
for x in xrange(10):
handler.handle(fake_record('Third One', 2010, 1, 7, x + 1))
for x in xrange(20):
handler.handle(fake_record('Last One', 2010, 1, 8, x + 1))
files = sorted(
x for x in os.listdir(self.dirname) if x.startswith('trot')
)
self.assertEqual(files, ['trot-2010-01-06.log', 'trot-2010-01-07.log',
'trot-2010-01-08.log'])
with open(os.path.join(self.dirname, 'trot-2010-01-08.log')) as f:
self.assertEqual(f.readline().rstrip(), '[01:00] Last One')
self.assertEqual(f.readline().rstrip(), '[02:00] Last One')
with open(os.path.join(self.dirname, 'trot-2010-01-07.log')) as f:
self.assertEqual(f.readline().rstrip(), '[01:00] Third One')
self.assertEqual(f.readline().rstrip(), '[02:00] Third One')
def test_mail_handler(self):
subject = u('\xf8nicode')
handler = make_fake_mail_handler(subject=subject)
with capturing_stderr_context() as fallback:
with self.thread_activation_strategy(handler):
self.log.warn('This is not mailed')
try:
1 / 0
except Exception:
self.log.exception(u('Viva la Espa\xf1a'))
if not handler.mails:
# if sending the mail failed, the reason should be on stderr
self.fail(fallback.getvalue())
self.assertEqual(len(handler.mails), 1)
sender, receivers, mail = handler.mails[0]
mail = mail.replace("\r", "")
self.assertEqual(sender, handler.from_addr)
self.assert_('=?utf-8?q?=C3=B8nicode?=' in mail)
self.assertRegexpMatches(mail, 'Message type:\s+ERROR')
self.assertRegexpMatches(mail, 'Location:.*%s' % __file_without_pyc__)
self.assertRegexpMatches(mail, 'Module:\s+%s' % __name__)
self.assertRegexpMatches(mail, 'Function:\s+test_mail_handler')
body = u('Message:\n\nViva la Espa\xf1a')
if sys.version_info < (3, 0):
body = body.encode('utf-8')
self.assertIn(body, mail)
self.assertIn('\n\nTraceback (most', mail)
self.assertIn('1 / 0', mail)
self.assertIn('This is not mailed', fallback.getvalue())
def test_mail_handler_record_limits(self):
suppression_test = re.compile('This message occurred additional \d+ '
'time\(s\) and was suppressed').search
handler = make_fake_mail_handler(record_limit=1,
record_delta=timedelta(seconds=0.5))
with self.thread_activation_strategy(handler):
later = datetime.utcnow() + timedelta(seconds=1.1)
while datetime.utcnow() < later:
self.log.error('Over and over...')
# first mail that is always delivered + 0.5 seconds * 2
# and 0.1 seconds of room for rounding errors makes 3 mails
self.assertEqual(len(handler.mails), 3)
# first mail is always delivered
self.assert_(not suppression_test(handler.mails[0][2]))
# the next two have a supression count
self.assert_(suppression_test(handler.mails[1][2]))
self.assert_(suppression_test(handler.mails[2][2]))
def test_mail_handler_batching(self):
mail_handler = make_fake_mail_handler()
handler = logbook.FingersCrossedHandler(mail_handler, reset=True)
with self.thread_activation_strategy(handler):
self.log.warn('Testing')
self.log.debug('Even more')
self.log.error('And this triggers it')
self.log.info('Aha')
self.log.error('And this triggers it again!')
self.assertEqual(len(mail_handler.mails), 2)
mail = mail_handler.mails[0][2]
pieces = mail.split('Log records that led up to this one:')
self.assertEqual(len(pieces), 2)
body, rest = pieces
rest = rest.replace("\r", "")
self.assertRegexpMatches(body, 'Message type:\s+ERROR')
self.assertRegexpMatches(body, 'Module:\s+%s' % __name__)
self.assertRegexpMatches(body, 'Function:\s+test_mail_handler_batching')
related = rest.strip().split('\n\n')
self.assertEqual(len(related), 2)
self.assertRegexpMatches(related[0], 'Message type:\s+WARNING')
self.assertRegexpMatches(related[1], 'Message type:\s+DEBUG')
self.assertIn('And this triggers it again', mail_handler.mails[1][2])
def test_group_handler_mail_combo(self):
mail_handler = make_fake_mail_handler(level=logbook.DEBUG)
handler = logbook.GroupHandler(mail_handler)
with self.thread_activation_strategy(handler):
self.log.error('The other way round')
self.log.warn('Testing')
self.log.debug('Even more')
self.assertEqual(mail_handler.mails, [])
self.assertEqual(len(mail_handler.mails), 1)
mail = mail_handler.mails[0][2]
pieces = mail.split('Other log records in the same group:')
self.assertEqual(len(pieces), 2)
body, rest = pieces
rest = rest.replace("\r", "")
self.assertRegexpMatches(body, 'Message type:\s+ERROR')
self.assertRegexpMatches(body, 'Module:\s+'+__name__)
self.assertRegexpMatches(body, 'Function:\s+test_group_handler_mail_combo')
related = rest.strip().split('\n\n')
self.assertEqual(len(related), 2)
self.assertRegexpMatches(related[0], 'Message type:\s+WARNING')
self.assertRegexpMatches(related[1], 'Message type:\s+DEBUG')
def test_syslog_handler(self):
to_test = [
(socket.AF_INET, ('127.0.0.1', 0)),
]
if hasattr(socket, 'AF_UNIX'):
to_test.append((socket.AF_UNIX, self.filename))
for sock_family, address in to_test:
with closing(socket.socket(sock_family, socket.SOCK_DGRAM)) as inc:
inc.bind(address)
inc.settimeout(1)
for app_name in [None, 'Testing']:
handler = logbook.SyslogHandler(app_name, inc.getsockname())
with self.thread_activation_strategy(handler):
self.log.warn('Syslog is weird')
try:
rv = inc.recvfrom(1024)[0]
except socket.error:
self.fail('got timeout on socket')
self.assertEqual(rv, (
u('<12>%stestlogger: Syslog is weird\x00') %
(app_name and app_name + u(':') or u(''))).encode('utf-8'))
def test_handler_processors(self):
handler = make_fake_mail_handler(format_string='''\
Subject: Application Error for {record.extra[path]} [{record.extra[method]}]
Message type: {record.level_name}
Location: {record.filename}:{record.lineno}
Module: {record.module}
Function: {record.func_name}
Time: {record.time:%Y-%m-%d %H:%M:%S}
Remote IP: {record.extra[ip]}
Request: {record.extra[path]} [{record.extra[method]}]
Message:
{record.message}
''')
class Request(object):
remote_addr = '127.0.0.1'
method = 'GET'
path = '/index.html'
def handle_request(request):
def inject_extra(record):
record.extra['ip'] = request.remote_addr
record.extra['method'] = request.method
record.extra['path'] = request.path
processor = logbook.Processor(inject_extra)
with self.thread_activation_strategy(processor):
handler.push_thread()
try:
try:
1 / 0
except Exception:
self.log.exception('Exception happened during request')
finally:
handler.pop_thread()
handle_request(Request())
self.assertEqual(len(handler.mails), 1)
mail = handler.mails[0][2]
self.assertIn('Subject: Application Error '
'for /index.html [GET]', mail)
self.assertIn('1 / 0', mail)
def test_regex_matching(self):
test_handler = logbook.TestHandler()
with self.thread_activation_strategy(test_handler):
self.log.warn('Hello World!')
self.assert_(test_handler.has_warning(re.compile('^Hello')))
self.assert_(not test_handler.has_warning(re.compile('world$')))
self.assert_(not test_handler.has_warning('^Hello World'))
def test_custom_handling_test(self):
class MyTestHandler(logbook.TestHandler):
def handle(self, record):
if record.extra.get('flag') != 'testing':
return False
return logbook.TestHandler.handle(self, record)
class MyLogger(logbook.Logger):
def process_record(self, record):
logbook.Logger.process_record(self, record)
record.extra['flag'] = 'testing'
log = MyLogger()
handler = MyTestHandler()
with capturing_stderr_context() as captured:
with self.thread_activation_strategy(handler):
log.warn('From my logger')
self.log.warn('From another logger')
self.assert_(handler.has_warning('From my logger'))
self.assertIn('From another logger', captured.getvalue())
def test_custom_handling_tester(self):
flag = True
class MyTestHandler(logbook.TestHandler):
def should_handle(self, record):
return flag
null_handler = logbook.NullHandler()
with self.thread_activation_strategy(null_handler):
test_handler = MyTestHandler()
with self.thread_activation_strategy(test_handler):
self.log.warn('1')
flag = False
self.log.warn('2')
self.assert_(test_handler.has_warning('1'))
self.assert_(not test_handler.has_warning('2'))
def test_null_handler(self):
with capturing_stderr_context() as captured:
with self.thread_activation_strategy(logbook.NullHandler()) as null_handler:
with self.thread_activation_strategy(logbook.TestHandler(level='ERROR')) as handler:
self.log.error('An error')
self.log.warn('A warning')
self.assertEqual(captured.getvalue(), '')
self.assertFalse(handler.has_warning('A warning'))
self.assert_(handler.has_error('An error'))
def test_test_handler_cache(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
self.log.warn('First line')
self.assertEqual(len(handler.formatted_records),1)
cache = handler.formatted_records # store cache, to make sure it is identifiable
self.assertEqual(len(handler.formatted_records),1)
self.assert_(cache is handler.formatted_records) # Make sure cache is not invalidated without changes to record
self.log.warn('Second line invalidates cache')
self.assertEqual(len(handler.formatted_records),2)
self.assertFalse(cache is handler.formatted_records) # Make sure cache is invalidated when records change
def test_blackhole_setting(self):
null_handler = logbook.NullHandler()
heavy_init = logbook.LogRecord.heavy_init
with self.thread_activation_strategy(null_handler):
def new_heavy_init(self):
raise RuntimeError('should not be triggered')
logbook.LogRecord.heavy_init = new_heavy_init
try:
with self.thread_activation_strategy(null_handler):
logbook.warn('Awesome')
finally:
logbook.LogRecord.heavy_init = heavy_init
null_handler.bubble = True
with capturing_stderr_context() as captured:
logbook.warning('Not a blockhole')
self.assertNotEqual(captured.getvalue(), '')
def test_calling_frame(self):
handler = logbook.TestHandler()
with self.thread_activation_strategy(handler):
logbook.warn('test')
self.assertEqual(handler.records[0].calling_frame, sys._getframe())
def test_nested_setups(self):
with capturing_stderr_context() as captured:
logger = logbook.Logger('App')
test_handler = logbook.TestHandler(level='WARNING')
mail_handler = make_fake_mail_handler(bubble=True)
handlers = logbook.NestedSetup([
logbook.NullHandler(),
test_handler,
mail_handler
])
with self.thread_activation_strategy(handlers):
logger.warn('This is a warning')
logger.error('This is also a mail')
try:
1 / 0
except Exception:
logger.exception()
logger.warn('And here we go straight back to stderr')
self.assert_(test_handler.has_warning('This is a warning'))
self.assert_(test_handler.has_error('This is also a mail'))
self.assertEqual(len(mail_handler.mails), 2)
self.assertIn('This is also a mail', mail_handler.mails[0][2])
self.assertIn('1 / 0',mail_handler.mails[1][2])
self.assertIn('And here we go straight back to stderr',
captured.getvalue())
with self.thread_activation_strategy(handlers):
logger.warn('threadbound warning')
handlers.push_application()
try:
logger.warn('applicationbound warning')
finally:
handlers.pop_application()
def test_dispatcher(self):
logger = logbook.Logger('App')
with self.thread_activation_strategy(logbook.TestHandler()) as test_handler:
logger.warn('Logbook is too awesome for stdlib')
self.assertEqual(test_handler.records[0].dispatcher, logger)
def test_filtering(self):
logger1 = logbook.Logger('Logger1')
logger2 = logbook.Logger('Logger2')
handler = logbook.TestHandler()
outer_handler = logbook.TestHandler()
def only_1(record, handler):
return record.dispatcher is logger1
handler.filter = only_1
with self.thread_activation_strategy(outer_handler):
with self.thread_activation_strategy(handler):
logger1.warn('foo')
logger2.warn('bar')
self.assert_(handler.has_warning('foo', channel='Logger1'))
self.assertFalse(handler.has_warning('bar', channel='Logger2'))
self.assertFalse(outer_handler.has_warning('foo', channel='Logger1'))
self.assert_(outer_handler.has_warning('bar', channel='Logger2'))
def test_null_handler_filtering(self):
logger1 = logbook.Logger("1")
logger2 = logbook.Logger("2")
outer = logbook.TestHandler()
inner = logbook.NullHandler()
inner.filter = lambda record, handler: record.dispatcher is logger1
with self.thread_activation_strategy(outer):
with self.thread_activation_strategy(inner):
logger1.warn("1")
logger2.warn("2")
self.assertTrue(outer.has_warning("2", channel="2"))
self.assertFalse(outer.has_warning("1", channel="1"))
def test_different_context_pushing(self):
h1 = logbook.TestHandler(level=logbook.DEBUG)
h2 = logbook.TestHandler(level=logbook.INFO)
h3 = logbook.TestHandler(level=logbook.WARNING)
logger = logbook.Logger('Testing')
with self.thread_activation_strategy(h1):
with self.thread_activation_strategy(h2):
with self.thread_activation_strategy(h3):
logger.warn('Wuuu')
logger.info('still awesome')
logger.debug('puzzled')
self.assert_(h1.has_debug('puzzled'))
self.assert_(h2.has_info('still awesome'))
self.assert_(h3.has_warning('Wuuu'))
for handler in h1, h2, h3:
self.assertEquals(len(handler.records), 1)
def test_global_functions(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
logbook.debug('a debug message')
logbook.info('an info message')
logbook.warn('warning part 1')
logbook.warning('warning part 2')
logbook.notice('notice')
logbook.error('an error')
logbook.critical('pretty critical')
logbook.log(logbook.CRITICAL, 'critical too')
self.assert_(handler.has_debug('a debug message'))
self.assert_(handler.has_info('an info message'))
self.assert_(handler.has_warning('warning part 1'))
self.assert_(handler.has_warning('warning part 2'))
self.assert_(handler.has_notice('notice'))
self.assert_(handler.has_error('an error'))
self.assert_(handler.has_critical('pretty critical'))
self.assert_(handler.has_critical('critical too'))
self.assertEqual(handler.records[0].channel, 'Generic')
self.assertIsNone(handler.records[0].dispatcher)
def test_fingerscrossed(self):
handler = logbook.FingersCrossedHandler(logbook.default_handler,
logbook.WARNING)
# if no warning occurs, the infos are not logged
with self.thread_activation_strategy(handler):
with capturing_stderr_context() as captured:
self.log.info('some info')
self.assertEqual(captured.getvalue(), '')
self.assert_(not handler.triggered)
# but if it does, all log messages are output
with self.thread_activation_strategy(handler):
with capturing_stderr_context() as captured:
self.log.info('some info')
self.log.warning('something happened')
self.log.info('something else happened')
logs = captured.getvalue()
self.assert_('some info' in logs)
self.assert_('something happened' in logs)
self.assert_('something else happened' in logs)
self.assert_(handler.triggered)
def test_fingerscrossed_factory(self):
handlers = []
def handler_factory(record, fch):
handler = logbook.TestHandler()
handlers.append(handler)
return handler
def make_fch():
return logbook.FingersCrossedHandler(handler_factory,
logbook.WARNING)
fch = make_fch()
with self.thread_activation_strategy(fch):
self.log.info('some info')
self.assertEqual(len(handlers), 0)
self.log.warning('a warning')
self.assertEqual(len(handlers), 1)
self.log.error('an error')
self.assertEqual(len(handlers), 1)
self.assert_(handlers[0].has_infos)
self.assert_(handlers[0].has_warnings)
self.assert_(handlers[0].has_errors)
self.assert_(not handlers[0].has_notices)
self.assert_(not handlers[0].has_criticals)
self.assert_(not handlers[0].has_debugs)
fch = make_fch()
with self.thread_activation_strategy(fch):
self.log.info('some info')
self.log.warning('a warning')
self.assertEqual(len(handlers), 2)
def test_fingerscrossed_buffer_size(self):
logger = logbook.Logger('Test')
test_handler = logbook.TestHandler()
handler = logbook.FingersCrossedHandler(test_handler, buffer_size=3)
with self.thread_activation_strategy(handler):
logger.info('Never gonna give you up')
logger.warn('Aha!')
logger.warn('Moar!')
logger.error('Pure hate!')
self.assertEqual(test_handler.formatted_records, [
'[WARNING] Test: Aha!',
'[WARNING] Test: Moar!',
'[ERROR] Test: Pure hate!'
])
class HandlerTestCase_Regular(_HandlerTestCase):
def setUp(self):
super(HandlerTestCase_Regular, self).setUp()
self.thread_activation_strategy = activate_via_push_pop
class HandlerTestCase_Contextmgr(_HandlerTestCase):
def setUp(self):
super(HandlerTestCase_Contextmgr, self).setUp()
self.thread_activation_strategy = activate_via_with_statement
class AttributeTestCase(LogbookTestCase):
def test_level_properties(self):
self.assertEqual(self.log.level, logbook.NOTSET)
self.assertEqual(self.log.level_name, 'NOTSET')
self.log.level_name = 'WARNING'
self.assertEqual(self.log.level, logbook.WARNING)
self.log.level = logbook.ERROR
self.assertEqual(self.log.level_name, 'ERROR')
def test_reflected_properties(self):
group = logbook.LoggerGroup()
group.add_logger(self.log)
self.assertEqual(self.log.group, group)
group.level = logbook.ERROR
self.assertEqual(self.log.level, logbook.ERROR)
self.assertEqual(self.log.level_name, 'ERROR')
group.level = logbook.WARNING
self.assertEqual(self.log.level, logbook.WARNING)
self.assertEqual(self.log.level_name, 'WARNING')
self.log.level = logbook.CRITICAL
group.level = logbook.DEBUG
self.assertEqual(self.log.level, logbook.CRITICAL)
self.assertEqual(self.log.level_name, 'CRITICAL')
group.remove_logger(self.log)
self.assertEqual(self.log.group, None)
class LevelLookupTest(LogbookTestCase):
def test_level_lookup_failures(self):
with self.assertRaises(LookupError):
logbook.get_level_name(37)
with self.assertRaises(LookupError):
logbook.lookup_level('FOO')
class FlagsTestCase(LogbookTestCase):
def test_error_flag(self):
with capturing_stderr_context() as captured:
with logbook.Flags(errors='print'):
with logbook.Flags(errors='silent'):
self.log.warn('Foo {42}', 'aha')
self.assertEqual(captured.getvalue(), '')
with logbook.Flags(errors='silent'):
with logbook.Flags(errors='print'):
self.log.warn('Foo {42}', 'aha')
self.assertNotEqual(captured.getvalue(), '')
with self.assertRaises(Exception) as caught:
with logbook.Flags(errors='raise'):
self.log.warn('Foo {42}', 'aha')
self.assertIn('Could not format message with provided '
'arguments', str(caught.exception))
def test_disable_introspection(self):
with logbook.Flags(introspection=False):
with logbook.TestHandler() as h:
self.log.warn('Testing')
self.assertIsNone(h.records[0].frame)
self.assertIsNone(h.records[0].calling_frame)
self.assertIsNone(h.records[0].module)
class LoggerGroupTestCase(LogbookTestCase):
def test_groups(self):
def inject_extra(record):
record.extra['foo'] = 'bar'
group = logbook.LoggerGroup(processor=inject_extra)
group.level = logbook.ERROR
group.add_logger(self.log)
with logbook.TestHandler() as handler:
self.log.warn('A warning')
self.log.error('An error')
self.assertFalse(handler.has_warning('A warning'))
self.assertTrue(handler.has_error('An error'))
self.assertEqual(handler.records[0].extra['foo'], 'bar')
class DefaultConfigurationTestCase(LogbookTestCase):
def test_default_handlers(self):
with capturing_stderr_context() as stream:
self.log.warn('Aha!')
captured = stream.getvalue()
self.assertIn('WARNING: testlogger: Aha!', captured)
class LoggingCompatTestCase(LogbookTestCase):
def test_basic_compat_with_level_setting(self):
self._test_basic_compat(True)
def test_basic_compat_without_level_setting(self):
self._test_basic_compat(False)
def _test_basic_compat(self, set_root_logger_level):
import logging
from logbook.compat import redirected_logging
# mimic the default logging setting
self.addCleanup(logging.root.setLevel, logging.root.level)
logging.root.setLevel(logging.WARNING)
name = 'test_logbook-%d' % randrange(1 << 32)
logger = logging.getLogger(name)
with logbook.TestHandler(bubble=True) as handler:
with capturing_stderr_context() as captured:
with redirected_logging(set_root_logger_level):
logger.debug('This is from the old system')
logger.info('This is from the old system')
logger.warn('This is from the old system')
logger.error('This is from the old system')
logger.critical('This is from the old system')
self.assertIn(('WARNING: %s: This is from the old system' % name),
captured.getvalue())
if set_root_logger_level:
self.assertEquals(handler.records[0].level, logbook.DEBUG)
else:
self.assertEquals(handler.records[0].level, logbook.WARNING)
def test_redirect_logbook(self):
import logging
from logbook.compat import LoggingHandler
out = StringIO()
logger = logging.getLogger()
old_handlers = logger.handlers[:]
handler = logging.StreamHandler(out)
handler.setFormatter(logging.Formatter(
'%(name)s:%(levelname)s:%(message)s'))
logger.handlers[:] = [handler]
try:
with logbook.compat.LoggingHandler() as logging_handler:
self.log.warn("This goes to logging")
pieces = out.getvalue().strip().split(':')
self.assertEqual(pieces, [
'testlogger',
'WARNING',
'This goes to logging'
])
finally:
logger.handlers[:] = old_handlers
class WarningsCompatTestCase(LogbookTestCase):
def test_warning_redirections(self):
from logbook.compat import redirected_warnings
with logbook.TestHandler() as handler:
redirector = redirected_warnings()
redirector.start()
try:
from warnings import warn
warn(RuntimeWarning('Testing'))
finally:
redirector.end()
self.assertEqual(len(handler.records), 1)
self.assertEqual('[WARNING] RuntimeWarning: Testing',
handler.formatted_records[0])
self.assertIn(__file_without_pyc__, handler.records[0].filename)
class MoreTestCase(LogbookTestCase):
@contextmanager
def _get_temporary_file_context(self):
fn = tempfile.mktemp()
try:
yield fn
finally:
try:
os.remove(fn)
except OSError:
pass
@require_module('jinja2')
def test_jinja_formatter(self):
from logbook.more import JinjaFormatter
fmter = JinjaFormatter('{{ record.channel }}/{{ record.level_name }}')
handler = logbook.TestHandler()
handler.formatter = fmter
with handler:
self.log.info('info')
self.assertIn('testlogger/INFO', handler.formatted_records)
@missing('jinja2')
def test_missing_jinja2(self):
from logbook.more import JinjaFormatter
# check the RuntimeError is raised
with self.assertRaises(RuntimeError):
JinjaFormatter('dummy')
def test_colorizing_support(self):
from logbook.more import ColorizedStderrHandler
class TestColorizingHandler(ColorizedStderrHandler):
def should_colorize(self, record):
return True
stream = StringIO()
with TestColorizingHandler(format_string='{record.message}') as handler:
self.log.error('An error')
self.log.warn('A warning')
self.log.debug('A debug message')
lines = handler.stream.getvalue().rstrip('\n').splitlines()
self.assertEqual(lines, [
'\x1b[31;01mAn error',
'\x1b[39;49;00m\x1b[33;01mA warning',
'\x1b[39;49;00m\x1b[37mA debug message',
'\x1b[39;49;00m'
])
def test_tagged(self):
from logbook.more import TaggingLogger, TaggingHandler
stream = StringIO()
second_handler = logbook.StreamHandler(stream)
logger = TaggingLogger('name', ['cmd'])
handler = TaggingHandler(dict(
info=logbook.default_handler,
cmd=second_handler,
both=[logbook.default_handler, second_handler],
))
handler.bubble = False
with handler:
with capturing_stderr_context() as captured:
logger.log('info', 'info message')
logger.log('both', 'all message')
logger.cmd('cmd message')
stderr = captured.getvalue()
self.assertIn('info message', stderr)
self.assertIn('all message', stderr)
self.assertNotIn('cmd message', stderr)
stringio = stream.getvalue()
self.assertNotIn('info message', stringio)
self.assertIn('all message', stringio)
self.assertIn('cmd message', stringio)
def test_external_application_handler(self):
from logbook.more import ExternalApplicationHandler as Handler
with self._get_temporary_file_context() as fn:
handler = Handler([sys.executable, '-c', r'''if 1:
f = open(%(tempfile)s, 'w')
try:
f.write('{record.message}\n')
finally:
f.close()
''' % {'tempfile': repr(fn)}])
with handler:
self.log.error('this is a really bad idea')
with open(fn, 'r') as rf:
contents = rf.read().strip()
self.assertEqual(contents, 'this is a really bad idea')
def test_external_application_handler_stdin(self):
from logbook.more import ExternalApplicationHandler as Handler
with self._get_temporary_file_context() as fn:
handler = Handler([sys.executable, '-c', r'''if 1:
import sys
f = open(%(tempfile)s, 'w')
try:
f.write(sys.stdin.read())
finally:
f.close()
''' % {'tempfile': repr(fn)}], '{record.message}\n')
with handler:
self.log.error('this is a really bad idea')
with open(fn, 'r') as rf:
contents = rf.read().strip()
self.assertEqual(contents, 'this is a really bad idea')
def test_exception_handler(self):
from logbook.more import ExceptionHandler
with ExceptionHandler(ValueError) as exception_handler:
with self.assertRaises(ValueError) as caught:
self.log.info('here i am')
self.assertIn('INFO: testlogger: here i am', caught.exception.args[0])
def test_exception_handler_specific_level(self):
from logbook.more import ExceptionHandler
with logbook.TestHandler() as test_handler:
with self.assertRaises(ValueError) as caught:
with ExceptionHandler(ValueError, level='WARNING') as exception_handler:
self.log.info('this is irrelevant')
self.log.warn('here i am')
self.assertIn('WARNING: testlogger: here i am', caught.exception.args[0])
self.assertIn('this is irrelevant', test_handler.records[0].message)
def test_dedup_handler(self):
from logbook.more import DedupHandler
with logbook.TestHandler() as test_handler:
with DedupHandler():
self.log.info('foo')
self.log.info('bar')
self.log.info('foo')
self.assertEqual(2, len(test_handler.records))
self.assertIn('message repeated 2 times: foo', test_handler.records[0].message)
self.assertIn('message repeated 1 times: bar', test_handler.records[1].message)
class QueuesTestCase(LogbookTestCase):
def _get_zeromq(self, multi=False):
from logbook.queues import ZeroMQHandler, ZeroMQSubscriber
# Get an unused port
tempsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tempsock.bind(('localhost', 0))
host, unused_port = tempsock.getsockname()
tempsock.close()
# Retrieve the ZeroMQ handler and subscriber
uri = 'tcp://%s:%d' % (host, unused_port)
if multi:
handler = [ZeroMQHandler(uri, multi=True) for _ in range(3)]
else:
handler = ZeroMQHandler(uri)
subscriber = ZeroMQSubscriber(uri, multi=multi)
# Enough time to start
time.sleep(0.1)
return handler, subscriber
@require_module('zmq')
def test_zeromq_handler(self):
tests = [
u('Logging something'),
u('Something with umlauts äöü'),
u('Something else for good measure'),
]
handler, subscriber = self._get_zeromq()
for test in tests:
with handler:
self.log.warn(test)
record = subscriber.recv()
self.assertEqual(record.message, test)
self.assertEqual(record.channel, self.log.name)
@require_module('zmq')
def test_multi_zeromq_handler(self):
tests = [
u('Logging something'),
u('Something with umlauts äöü'),
u('Something else for good measure'),
]
handlers, subscriber = self._get_zeromq(multi=True)
for handler in handlers:
for test in tests:
with handler:
self.log.warn(test)
record = subscriber.recv()
self.assertEqual(record.message, test)
self.assertEqual(record.channel, self.log.name)
@require_module('zmq')
def test_zeromq_background_thread(self):
handler, subscriber = self._get_zeromq()
test_handler = logbook.TestHandler()
controller = subscriber.dispatch_in_background(test_handler)
with handler:
self.log.warn('This is a warning')
self.log.error('This is an error')
# stop the controller. This will also stop the loop and join the
# background process. Before that we give it a fraction of a second
# to get all results
time.sleep(0.2)
controller.stop()
self.assertTrue(test_handler.has_warning('This is a warning'))
self.assertTrue(test_handler.has_error('This is an error'))
@missing('zmq')
def test_missing_zeromq(self):
from logbook.queues import ZeroMQHandler, ZeroMQSubscriber
with self.assertRaises(RuntimeError):
ZeroMQHandler('tcp://127.0.0.1:42000')
with self.assertRaises(RuntimeError):
ZeroMQSubscriber('tcp://127.0.0.1:42000')
@require_module('multiprocessing')
def test_multi_processing_handler(self):
from multiprocessing import Process, Queue
from logbook.queues import MultiProcessingHandler, \
MultiProcessingSubscriber
queue = Queue(-1)
test_handler = logbook.TestHandler()
subscriber = MultiProcessingSubscriber(queue)
def send_back():
handler = MultiProcessingHandler(queue)
handler.push_thread()
try:
logbook.warn('Hello World')
finally:
handler.pop_thread()
p = Process(target=send_back)
p.start()
p.join()
with test_handler:
subscriber.dispatch_once()
self.assert_(test_handler.has_warning('Hello World'))
def test_threaded_wrapper_handler(self):
from logbook.queues import ThreadedWrapperHandler
test_handler = logbook.TestHandler()
with ThreadedWrapperHandler(test_handler) as handler:
self.log.warn('Just testing')
self.log.error('More testing')
# give it some time to sync up
handler.close()
self.assertTrue(not handler.controller.running)
self.assertTrue(test_handler.has_warning('Just testing'))
self.assertTrue(test_handler.has_error('More testing'))
@require_module('execnet')
def test_execnet_handler(self):
def run_on_remote(channel):
import logbook
from logbook.queues import ExecnetChannelHandler
handler = ExecnetChannelHandler(channel)
log = logbook.Logger("Execnet")
handler.push_application()
log.info('Execnet works')
import execnet
gw = execnet.makegateway()
channel = gw.remote_exec(run_on_remote)
from logbook.queues import ExecnetChannelSubscriber
subscriber = ExecnetChannelSubscriber(channel)
record = subscriber.recv()
self.assertEqual(record.msg, 'Execnet works')
gw.exit()
@require_module('multiprocessing')
def test_subscriber_group(self):
from multiprocessing import Process, Queue
from logbook.queues import MultiProcessingHandler, \
MultiProcessingSubscriber, SubscriberGroup
a_queue = Queue(-1)
b_queue = Queue(-1)
test_handler = logbook.TestHandler()
subscriber = SubscriberGroup([
MultiProcessingSubscriber(a_queue),
MultiProcessingSubscriber(b_queue)
])
def make_send_back(message, queue):
def send_back():
with MultiProcessingHandler(queue):
logbook.warn(message)
return send_back
for _ in range(10):
p1 = Process(target=make_send_back('foo', a_queue))
p2 = Process(target=make_send_back('bar', b_queue))
p1.start()
p2.start()
p1.join()
p2.join()
messages = [subscriber.recv().message for i in (1, 2)]
self.assertEqual(sorted(messages), ['bar', 'foo'])
@require_module('redis')
def test_redis_handler(self):
import redis
from logbook.queues import RedisHandler
KEY = 'redis'
FIELDS = ['message', 'host']
r = redis.Redis(decode_responses=True)
redis_handler = RedisHandler(level=logbook.INFO, bubble=True)
#We don't want output for the tests, so we can wrap everything in a NullHandler
null_handler = logbook.NullHandler()
#Check default values
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS)
key, message = r.blpop(KEY)
#Are all the fields in the record?
[self.assertTrue(message.find(field)) for field in FIELDS]
self.assertEqual(key, KEY)
self.assertTrue(message.find(LETTERS))
#Change the key of the handler and check on redis
KEY = 'test_another_key'
redis_handler.key = KEY
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS)
key, message = r.blpop(KEY)
self.assertEqual(key, KEY)
#Check that extra fields are added if specified when creating the handler
FIELDS.append('type')
extra_fields = {'type': 'test'}
del(redis_handler)
redis_handler = RedisHandler(key=KEY, level=logbook.INFO,
extra_fields=extra_fields, bubble=True)
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS)
key, message = r.blpop(KEY)
[self.assertTrue(message.find(field)) for field in FIELDS]
self.assertTrue(message.find('test'))
#And finally, check that fields are correctly added if appended to the
#log message
FIELDS.append('more_info')
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS, more_info='This works')
key, message = r.blpop(KEY)
[self.assertTrue(message.find(field)) for field in FIELDS]
self.assertTrue(message.find('This works'))
class TicketingTestCase(LogbookTestCase):
@require_module('sqlalchemy')
def test_basic_ticketing(self):
from logbook.ticketing import TicketingHandler
with TicketingHandler('sqlite:///') as handler:
for x in xrange(5):
self.log.warn('A warning')
self.log.info('An error')
if x < 2:
try:
1 / 0
except Exception:
self.log.exception()
self.assertEqual(handler.db.count_tickets(), 3)
tickets = handler.db.get_tickets()
self.assertEqual(len(tickets), 3)
self.assertEqual(tickets[0].level, logbook.INFO)
self.assertEqual(tickets[1].level, logbook.WARNING)
self.assertEqual(tickets[2].level, logbook.ERROR)
self.assertEqual(tickets[0].occurrence_count, 5)
self.assertEqual(tickets[1].occurrence_count, 5)
self.assertEqual(tickets[2].occurrence_count, 2)
self.assertEqual(tickets[0].last_occurrence.level, logbook.INFO)
tickets[0].solve()
self.assert_(tickets[0].solved)
tickets[0].delete()
ticket = handler.db.get_ticket(tickets[1].ticket_id)
self.assertEqual(ticket, tickets[1])
occurrences = handler.db.get_occurrences(tickets[2].ticket_id,
order_by='time')
self.assertEqual(len(occurrences), 2)
record = occurrences[0]
self.assertIn(__file_without_pyc__, record.filename)
# avoid 2to3 destroying our assertion
self.assertEqual(getattr(record, 'func_name'), 'test_basic_ticketing')
self.assertEqual(record.level, logbook.ERROR)
self.assertEqual(record.thread, get_ident())
self.assertEqual(record.process, os.getpid())
self.assertEqual(record.channel, 'testlogger')
self.assertIn('1 / 0', record.formatted_exception)
class HelperTestCase(LogbookTestCase):
def test_jsonhelper(self):
from logbook.helpers import to_safe_json
class Bogus(object):
def __str__(self):
return 'bogus'
rv = to_safe_json([
None,
'foo',
u('jäger'),
1,
datetime(2000, 1, 1),
{'jäger1': 1, u('jäger2'): 2, Bogus(): 3, 'invalid': object()},
object() # invalid
])
self.assertEqual(
rv, [None, u('foo'), u('jäger'), 1, '2000-01-01T00:00:00Z',
{u('jäger1'): 1, u('jäger2'): 2, u('bogus'): 3,
u('invalid'): None}, None])
def test_datehelpers(self):
from logbook.helpers import format_iso8601, parse_iso8601
now = datetime.now()
rv = format_iso8601()
self.assertEqual(rv[:4], str(now.year))
self.assertRaises(ValueError, parse_iso8601, 'foo')
v = parse_iso8601('2000-01-01T00:00:00.12Z')
self.assertEqual(v.microsecond, 120000)
v = parse_iso8601('2000-01-01T12:00:00+01:00')
self.assertEqual(v.hour, 11)
v = parse_iso8601('2000-01-01T12:00:00-01:00')
self.assertEqual(v.hour, 13)
class UnicodeTestCase(LogbookTestCase):
# in Py3 we can just assume a more uniform unicode environment
@require_py3
def test_default_format_unicode(self):
with capturing_stderr_context() as stream:
self.log.warn('\u2603')
self.assertIn('WARNING: testlogger: \u2603', stream.getvalue())
@require_py3
def test_default_format_encoded(self):
with capturing_stderr_context() as stream:
# it's a string but it's in the right encoding so don't barf
self.log.warn('\u2603')
self.assertIn('WARNING: testlogger: \u2603', stream.getvalue())
@require_py3
def test_default_format_bad_encoding(self):
with capturing_stderr_context() as stream:
# it's a string, is wrong, but just dump it in the logger,
# don't try to decode/encode it
self.log.warn('Русский'.encode('koi8-r'))
self.assertIn("WARNING: testlogger: b'\\xf2\\xd5\\xd3\\xd3\\xcb\\xc9\\xca'", stream.getvalue())
@require_py3
def test_custom_unicode_format_unicode(self):
format_string = ('[{record.level_name}] '
'{record.channel}: {record.message}')
with capturing_stderr_context() as stream:
with logbook.StderrHandler(format_string=format_string):
self.log.warn("\u2603")
self.assertIn('[WARNING] testlogger: \u2603', stream.getvalue())
@require_py3
def test_custom_string_format_unicode(self):
format_string = ('[{record.level_name}] '
'{record.channel}: {record.message}')
with capturing_stderr_context() as stream:
with logbook.StderrHandler(format_string=format_string):
self.log.warn('\u2603')
self.assertIn('[WARNING] testlogger: \u2603', stream.getvalue())
@require_py3
def test_unicode_message_encoded_params(self):
with capturing_stderr_context() as stream:
self.log.warn("\u2603 {0}", "\u2603".encode('utf8'))
self.assertIn("WARNING: testlogger: \u2603 b'\\xe2\\x98\\x83'", stream.getvalue())
@require_py3
def test_encoded_message_unicode_params(self):
with capturing_stderr_context() as stream:
self.log.warn('\u2603 {0}'.encode('utf8'), '\u2603')
self.assertIn('WARNING: testlogger: \u2603 \u2603', stream.getvalue())
| bsd-3-clause | 4,229,163,377,748,149,000 | 39.432906 | 162 | 0.589857 | false |
scavallero/mydomus | auth.py | 1 | 3792 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MyDomus - Polling Service
# Copyright (c) 2016 Salvatore Cavallero ([email protected])
# https://github.com/scavallero/mydomus
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time
import json
import hashlib
import logging
import httpapp
import os
#########################################################################
# Module setup
########################################################################
logger = logging.getLogger("Mydomus")
user = {}
def verifyUser(usr,pswd):
res = False
if usr in user.keys():
if user[usr]['password'] == pswd:
res = True
return res
def verifyToken(token):
res = False
usr = ""
for item in user.keys():
if 'token' in user[item].keys():
if user[item]['token'] == token:
res = True
usr = item
return res,usr
def decodeUrlToken(url):
fields = url.split('/')
token = fields[-1]
del fields[-1]
new_url = ''
for item in fields:
if item != '':
new_url = new_url + '/'+item
if new_url == '':
new_url = '/'
res,usr = verifyToken(token)
if res:
return new_url
else:
return None
def load():
global user
logger.info("Start loading user authorization")
CWD = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(CWD,"user.conf")) as data_file:
try:
user = json.load(data_file)
except ValueError: # includes simplejson.decoder.JSONDecodeError
logger.critical('json decoding failure user.conf')
for item in user.keys():
h = hashlib.sha224(item+user[item]['password']).hexdigest()
p = hashlib.md5(user[item]['password']).hexdigest()
user[item]['token'] = h
user[item]['password'] = p
logger.info('User: %s - %s' % (item,h))
### ADDED API ###
@httpapp.addurl('/verify/')
def url_verify(p,m):
global user
fields = p.split('/')
if len(fields) == 4:
if fields[2] in user.keys():
if fields[3] == user[fields[2]]['password']:
return '{"status":"ok","token":"%s"}' % user[fields[2]]['token']
else:
return '{"status":"error","reason":"wrong password"}'
else:
return '{"status":"error","reason":"user unknown"}'
else:
return '{"status":"error","reason":"missing user or password"}'
@httpapp.addurl('/checktoken/')
def url_checktoken(p,m):
global user
fields = p.split('/')
if len(fields) == 3:
token = fields[2]
res,usr = verifyToken(token)
if res:
return '{"status":"ok","user":"%s"}' % usr
else:
return '{"status":"error","reason":"wrong token"}'
else:
return '{"status":"error","reason":"missing token"}'
logger.info("User authorization loaded")
| gpl-3.0 | -3,929,618,149,276,053,000 | 26.882353 | 86 | 0.531909 | false |
arunkgupta/gramps | gramps/gui/merge/__init__.py | 1 | 1096 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2006 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
"""
from mergeperson import *
from mergefamily import *
from mergeevent import *
from mergeplace import *
from mergesource import *
from mergecitation import *
from mergerepository import *
from mergemedia import *
from mergenote import *
| gpl-2.0 | 1,824,475,253,137,855,000 | 30.314286 | 75 | 0.751825 | false |
Stanford-Online/edx-analytics-data-api | analytics_data_api/v0/views/__init__.py | 1 | 10029 | from itertools import groupby
from django.db import models
from django.db.models import Q
from django.utils import timezone
from rest_framework import generics, serializers
from opaque_keys.edx.keys import CourseKey
from analytics_data_api.v0.exceptions import CourseNotSpecifiedError
from analytics_data_api.v0.views.utils import (
raise_404_if_none,
split_query_argument,
validate_course_id
)
class CourseViewMixin(object):
"""
Captures the course_id from the url and validates it.
"""
course_id = None
def get(self, request, *args, **kwargs):
self.course_id = self.kwargs.get('course_id', request.query_params.get('course_id', None))
if not self.course_id:
raise CourseNotSpecifiedError()
validate_course_id(self.course_id)
return super(CourseViewMixin, self).get(request, *args, **kwargs)
class PaginatedHeadersMixin(object):
"""
If the response is paginated, then augment it with this response header:
* Link: list of next and previous pagination URLs, e.g.
<next_url>; rel="next", <previous_url>; rel="prev"
Format follows the github API convention:
https://developer.github.com/guides/traversing-with-pagination/
Useful with PaginatedCsvRenderer, so that previous/next links aren't lost when returning CSV data.
"""
# TODO: When we upgrade to Django REST API v3.1, define a custom DEFAULT_PAGINATION_CLASS
# instead of using this mechanism:
# http://www.django-rest-framework.org/api-guide/pagination/#header-based-pagination
def get(self, request, *args, **kwargs):
"""
Stores pagination links in a response header.
"""
response = super(PaginatedHeadersMixin, self).get(request, args, kwargs)
link = self.get_paginated_links(response.data)
if link:
response['Link'] = link
return response
@staticmethod
def get_paginated_links(data):
"""
Returns the links string.
"""
# Un-paginated data is returned as a list, not a dict.
next_url = None
prev_url = None
if isinstance(data, dict):
next_url = data.get('next')
prev_url = data.get('previous')
if next_url is not None and prev_url is not None:
link = '<{next_url}>; rel="next", <{prev_url}>; rel="prev"'
elif next_url is not None:
link = '<{next_url}>; rel="next"'
elif prev_url is not None:
link = '<{prev_url}>; rel="prev"'
else:
link = ''
return link.format(next_url=next_url, prev_url=prev_url)
class CsvViewMixin(object):
"""
Augments a text/csv response with this header:
* Content-Disposition: allows the client to download the response as a file attachment.
"""
# Default filename slug for CSV download files
filename_slug = 'report'
def get_csv_filename(self):
"""
Returns the filename for the CSV download.
"""
course_key = CourseKey.from_string(self.course_id)
course_id = u'-'.join([course_key.org, course_key.course, course_key.run])
now = timezone.now().replace(microsecond=0)
return u'{0}--{1}--{2}.csv'.format(course_id, now.isoformat(), self.filename_slug)
def finalize_response(self, request, response, *args, **kwargs):
"""
Append Content-Disposition header to CSV requests.
"""
if request.META.get('HTTP_ACCEPT') == u'text/csv':
response['Content-Disposition'] = u'attachment; filename={}'.format(self.get_csv_filename())
return super(CsvViewMixin, self).finalize_response(request, response, *args, **kwargs)
class APIListView(generics.ListAPIView):
"""
An abstract view to store common code for views that return a list of data.
**Example Requests**
GET /api/v0/some_endpoint/
Returns full list of serialized models with all default fields.
GET /api/v0/some_endpoint/?ids={id_1},{id_2}
Returns list of serialized models with IDs that match an ID in the given
`ids` query parameter with all default fields.
GET /api/v0/some_endpoint/?ids={id_1},{id_2}&fields={some_field_1},{some_field_2}
Returns list of serialized models with IDs that match an ID in the given
`ids` query parameter with only the fields in the given `fields` query parameter.
GET /api/v0/some_endpoint/?ids={id_1},{id_2}&exclude={some_field_1},{some_field_2}
Returns list of serialized models with IDs that match an ID in the given
`ids` query parameter with all fields except those in the given `exclude` query
parameter.
POST /api/v0/some_endpoint/
{
"ids": [
"{id_1}",
"{id_2}",
...
"{id_200}"
],
"fields": [
"{some_field_1}",
"{some_field_2}"
]
}
**Response Values**
Since this is an abstract class, this view just returns an empty list.
**Parameters**
This view supports filtering the results by a given list of IDs. It also supports
explicitly specifying the fields to include in each result with `fields` as well of
the fields to exclude with `exclude`.
For GET requests, these parameters are passed in the query string.
For POST requests, these parameters are passed as a JSON dict in the request body.
ids -- The comma-separated list of identifiers for which results are filtered to.
For example, 'edX/DemoX/Demo_Course,course-v1:edX+DemoX+Demo_2016'. Default is to
return all courses.
fields -- The comma-separated fields to return in the response.
For example, 'course_id,created'. Default is to return all fields.
exclude -- The comma-separated fields to exclude in the response.
For example, 'course_id,created'. Default is to not exclude any fields.
**Notes**
* GET is usable when the number of IDs is relatively low
* POST is required when the number of course IDs would cause the URL to be too long.
* POST functions the same as GET here. It does not modify any state.
"""
ids = None
fields = None
exclude = None
always_exclude = []
model_id_field = 'id'
ids_param = 'ids'
def get_serializer(self, *args, **kwargs):
kwargs.update({
'context': self.get_serializer_context(),
'fields': self.fields,
'exclude': self.exclude
})
return self.get_serializer_class()(*args, **kwargs)
def get(self, request, *args, **kwargs):
query_params = self.request.query_params
self.fields = split_query_argument(query_params.get('fields'))
exclude = split_query_argument(query_params.get('exclude'))
self.exclude = self.always_exclude + (exclude if exclude else [])
self.ids = split_query_argument(query_params.get(self.ids_param))
self.verify_ids()
return super(APIListView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
# self.request.data is a QueryDict. For keys with singleton lists as values,
# QueryDicts return the singleton element of the list instead of the list itself,
# which is undesirable. So, we convert to a normal dict.
request_data_dict = dict(request.data)
self.fields = request_data_dict.get('fields')
exclude = request_data_dict.get('exclude')
self.exclude = self.always_exclude + (exclude if exclude else [])
self.ids = request_data_dict.get(self.ids_param)
self.verify_ids()
return super(APIListView, self).get(request, *args, **kwargs)
def verify_ids(self):
"""
Optionally raise an exception if any of the IDs set as self.ids are invalid.
By default, no verification is done.
Subclasses can override this if they wish to perform verification.
"""
pass
def base_field_dict(self, item_id):
"""Default result with fields pre-populated to default values."""
field_dict = {
self.model_id_field: item_id,
}
return field_dict
def update_field_dict_from_model(self, model, base_field_dict=None, field_list=None):
field_list = (field_list if field_list else
[f.name for f in self.model._meta.get_fields()]) # pylint: disable=protected-access
field_dict = base_field_dict if base_field_dict else {}
field_dict.update({field: getattr(model, field) for field in field_list})
return field_dict
def postprocess_field_dict(self, field_dict):
"""Applies some business logic to final result without access to any data from the original model."""
return field_dict
def group_by_id(self, queryset):
"""Return results aggregated by a distinct ID."""
aggregate_field_dict = []
for item_id, model_group in groupby(queryset, lambda x: (getattr(x, self.model_id_field))):
field_dict = self.base_field_dict(item_id)
for model in model_group:
field_dict = self.update_field_dict_from_model(model, base_field_dict=field_dict)
field_dict = self.postprocess_field_dict(field_dict)
aggregate_field_dict.append(field_dict)
return aggregate_field_dict
def get_query(self):
return reduce(lambda q, item_id: q | Q(id=item_id), self.ids, Q())
@raise_404_if_none
def get_queryset(self):
if self.ids:
queryset = self.model.objects.filter(self.get_query())
else:
queryset = self.model.objects.all()
field_dict = self.group_by_id(queryset)
# Django-rest-framework will serialize this dictionary to a JSON response
return field_dict
| agpl-3.0 | 8,010,458,603,060,841,000 | 36.561798 | 109 | 0.626683 | false |
vvoZokk/dnn | dnn_project/generate_protos.py | 1 | 4557 | #!/usr/bin/env python
import os
import argparse
import re
from collections import defaultdict
import sys
KNOWN_TYPES = {
"double" : "double",
"int" : "int32",
"size_t" : "uint32",
"float" : "float",
"string" : "string",
"bool" : "bool",
"complex<double>" : "double",
"pair<string, size_t>" : "TStringToUintPair",
"pair<size_t, size_t>" : "TUintToUintPair",
}
VECTOR_RE = re.compile("(?:vector|ActVector)+<(.*)>")
def generateProtos(all_structures, package, dst, imports):
for fname, structures in all_structures.iteritems():
dst_file = fname.split(".")[0] + ".proto"
with open(os.path.join(dst, dst_file), 'w') as f_ptr:
f_ptr.write("package %s;\n" % package)
f_ptr.write("\n")
for imp in imports:
f_ptr.write("import \"{}\";\n".format(imp))
f_ptr.write("\n")
for s in structures:
f_ptr.write("message %s {\n" % s['name'])
i = 1
for f in s['fields']:
if KNOWN_TYPES.get(f[0]) is None:
m = VECTOR_RE.match(f[0])
if m is None:
raise Exception("Can't match {}".format(f[0]))
f_ptr.write(" repeated %s %s = %s;\n" % (KNOWN_TYPES[ m.group(1) ], f[1], str(i)))
if m.group(1).startswith("complex"):
f_ptr.write(" repeated %s %s = %s;\n" % (KNOWN_TYPES[ m.group(1) ], f[1] + "_imag", str(i+1)))
i += 1
else:
f_ptr.write(" required %s %s = %s;\n" % (KNOWN_TYPES[ f[0] ], f[1], str(i)))
i += 1
f_ptr.write("}\n")
f_ptr.write("\n")
def parseSources(src):
structures = defaultdict(list)
for root, dirs, files in os.walk(src):
for f in files:
af = os.path.join(root, f)
generate_proto = False
if af.endswith(".cpp") or af.endswith(".h"):
for l in open(af):
l = l.strip()
l = l.split("//")[0]
if "@GENERATE_PROTO@" in l:
generate_proto = True
struct = {}
curly_counter = 0
continue
if generate_proto:
curly_counter += l.count("{")
curly_counter -= l.count("}")
if len(struct) == 0:
m = re.match("[\W]*(?:class|struct)[\W]+([^ ]+)", l)
if not m:
raise Exception("Can't parse GENERATE_PROTO class or struct")
struct['name'] = m.group(1)
struct['fields'] = []
else:
m = re.match(
"(%s)[\W]+(?!__)([^ ]*);[\W]*$" % "|".join(
KNOWN_TYPES.keys() + [ "(?:vector|ActVector)+<{}>".format(t) for t in KNOWN_TYPES.keys() ]
),
l
)
if m and curly_counter == 1:
struct['fields'].append( (m.group(1), m.group(2)) )
continue
if len(struct) > 0 and curly_counter == 0:
generate_proto = False
structures[f].append(struct)
return structures
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--source-path", help="Path to the sources",
type=str, required=True)
parser.add_argument("-d", "--dest-path", help="Path where to store .proto",
type=str, required=True)
parser.add_argument("-p", "--package", help="Package name, default : %(default)s",
type=str, required=False, default="Protos")
parser.add_argument("-i", "--imports", help="Put imports to all messages (separated by ;)",
type=str, required=False, default=None)
args = parser.parse_args()
structures = parseSources(args.source_path)
imports = []
if args.imports:
imports = [ v.strip() for v in args.imports.split(";") if v.strip() ]
generateProtos(structures, args.package, args.dest_path, imports)
| mit | 8,567,659,111,309,439,000 | 43.242718 | 126 | 0.432521 | false |
yasserglez/pytiger2c | packages/pytiger2c/dot.py | 1 | 2786 | # -*- coding: utf-8 -*-
"""
Clases utilizadas en la generación de un archivo Graphviz DOT con el
árbol de sintáxis abstracta creado a partir de un programa Tiger.
"""
class DotGenerator(object):
"""
Clase utilizada para la generación de grafos en formato Graphviz DOT.
"""
def __init__(self):
"""
Esta clase es utilizada en la generación de código Graphivz DOT
a partir de un árbol de sintáxis abstracta de un programa Tiger.
"""
self._nodes = []
self._edges = []
self._num_nodes = 0
def add_node(self, label):
"""
Añade un nuevo nodo al grafo actualmente en creación.
@type label: C{str}
@param label: Nombre del nodo que se quiere añadir.
@rtype: C{str}
@return: Identificador del nuevo nodo añadido. Este identificador
puede ser utilizado para crear nuevas aristas, utilizando
el método C{add_edge} de esta misma clase, que tengan
este nodo como uno de los extremos.
"""
self._num_nodes += 1
name = 'node{number}'.format(number=self._num_nodes)
code = '{name} [label="{label}"];'.format(name=name, label=label)
self._nodes.append(code)
return name
def add_edge(self, from_node, to_node):
"""
Añade una arista no dirigida al grafo actualmente en creación.
@type from_node: C{str}
@param from_node: Cadena de caracteres que identifica un nodo
extremo de la arista.
@type to_node: C{str}
@param to_node: Cadena de caracteres que identifica un nodo
extremo de la arista.
"""
template = '{from_node} -- {to_node};'
code = template.format(from_node=from_node, to_node=to_node)
self._edges.append(code)
def write(self, output_fd):
"""
Escribe el código Graphviz DOT en un descriptor de fichero.
@type output_fd: C{file}
@param output_fd: Descriptor de fichero donde se debe escribir el
código Graphviz DOT resultante de la traducción del programa
Tiger descrito por el árbol de sintáxis abstracta.
"""
indent = ' ' * 4
output_fd.write('graph AST {\n')
output_fd.write(indent)
output_fd.write('node [shape=record];\n\n')
for node_code in self._nodes:
output_fd.write(indent)
output_fd.write(node_code)
output_fd.write('\n')
output_fd.write('\n')
for edge_code in self._edges:
output_fd.write(indent)
output_fd.write(edge_code)
output_fd.write('\n')
output_fd.write('}\n')
| mit | -712,239,282,687,048,000 | 33.575 | 74 | 0.574476 | false |
CLVsol/odoo_addons | clv_medicament/clv_annotation/clv_annotation.py | 1 | 2262 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp.osv import fields, osv
class clv_medicament(osv.osv):
_inherit = 'clv_medicament'
_columns = {
'annotation_ids': fields.many2many('clv_annotation',
'clv_medicament_annotation_rel',
'medicament_id',
'annotation_id',
'Annotations')
}
class clv_annotation(osv.osv):
_inherit = 'clv_annotation'
_columns = {
'medicament_ids': fields.many2many('clv_medicament',
'clv_medicament_annotation_rel',
'annotation_id',
'medicament_id',
'Medicaments')
}
| agpl-3.0 | 2,606,340,208,862,698,500 | 52.857143 | 80 | 0.396994 | false |
dstanek/keystone | keystone/tests/unit/test_backend_sql.py | 1 | 40921 | # -*- coding: utf-8 -*-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import uuid
import mock
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_db import options
from oslo_log import log
from oslo_log import versionutils
from six.moves import range
import sqlalchemy
from sqlalchemy import exc
from testtools import matchers
from keystone.common import driver_hints
from keystone.common import sql
from keystone import exception
from keystone.identity.backends import sql as identity_sql
from keystone.tests import unit as tests
from keystone.tests.unit import default_fixtures
from keystone.tests.unit.ksfixtures import database
from keystone.tests.unit import test_backend
from keystone.token.persistence.backends import sql as token_sql
CONF = cfg.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class SqlTests(tests.SQLDriverOverrides, tests.TestCase):
def setUp(self):
super(SqlTests, self).setUp()
self.useFixture(database.Database())
self.load_backends()
# populate the engine with tables & fixtures
self.load_fixtures(default_fixtures)
# defaulted by the data load
self.user_foo['enabled'] = True
def config_files(self):
config_files = super(SqlTests, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
return config_files
class SqlModels(SqlTests):
def select_table(self, name):
table = sqlalchemy.Table(name,
sql.ModelBase.metadata,
autoload=True)
s = sqlalchemy.select([table])
return s
def assertExpectedSchema(self, table, expected_schema):
"""Assert that a table's schema is what we expect.
:param string table: the name of the table to inspect
:param tuple expected_schema: a tuple of tuples containing the
expected schema
:raises AssertionError: when the database schema doesn't match the
expected schema
The expected_schema format is simply::
(
('column name', sql type, qualifying detail),
...
)
The qualifying detail varies based on the type of the column::
- sql.Boolean columns must indicate the column's default value or
None if there is no default
- Columns with a length, like sql.String, must indicate the
column's length
- All other column types should use None
Example::
cols = (('id', sql.String, 64),
('enabled', sql.Boolean, True),
('extra', sql.JsonBlob, None))
self.assertExpectedSchema('table_name', cols)
"""
table = self.select_table(table)
actual_schema = []
for column in table.c:
if isinstance(column.type, sql.Boolean):
default = None
if column._proxies[0].default:
default = column._proxies[0].default.arg
actual_schema.append((column.name, type(column.type), default))
elif (hasattr(column.type, 'length') and
not isinstance(column.type, sql.Enum)):
# NOTE(dstanek): Even though sql.Enum columns have a length
# set we don't want to catch them here. Maybe in the future
# we'll check to see that they contain a list of the correct
# possible values.
actual_schema.append((column.name,
type(column.type),
column.type.length))
else:
actual_schema.append((column.name, type(column.type), None))
self.assertItemsEqual(expected_schema, actual_schema)
def test_user_model(self):
cols = (('id', sql.String, 64),
('name', sql.String, 255),
('password', sql.String, 128),
('domain_id', sql.String, 64),
('default_project_id', sql.String, 64),
('enabled', sql.Boolean, None),
('extra', sql.JsonBlob, None))
self.assertExpectedSchema('user', cols)
def test_group_model(self):
cols = (('id', sql.String, 64),
('name', sql.String, 64),
('description', sql.Text, None),
('domain_id', sql.String, 64),
('extra', sql.JsonBlob, None))
self.assertExpectedSchema('group', cols)
def test_domain_model(self):
cols = (('id', sql.String, 64),
('name', sql.String, 64),
('enabled', sql.Boolean, True),
('extra', sql.JsonBlob, None))
self.assertExpectedSchema('domain', cols)
def test_project_model(self):
cols = (('id', sql.String, 64),
('name', sql.String, 64),
('description', sql.Text, None),
('domain_id', sql.String, 64),
('enabled', sql.Boolean, None),
('extra', sql.JsonBlob, None),
('parent_id', sql.String, 64))
self.assertExpectedSchema('project', cols)
def test_role_assignment_model(self):
cols = (('type', sql.Enum, None),
('actor_id', sql.String, 64),
('target_id', sql.String, 64),
('role_id', sql.String, 64),
('inherited', sql.Boolean, False))
self.assertExpectedSchema('assignment', cols)
def test_user_group_membership(self):
cols = (('group_id', sql.String, 64),
('user_id', sql.String, 64))
self.assertExpectedSchema('user_group_membership', cols)
class SqlIdentity(SqlTests, test_backend.IdentityTests):
def test_password_hashed(self):
session = sql.get_session()
user_ref = self.identity_api._get_user(session, self.user_foo['id'])
self.assertNotEqual(user_ref['password'], self.user_foo['password'])
def test_delete_user_with_project_association(self):
user = {'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex}
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_bar['id'],
user['id'])
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
self.assignment_api.list_projects_for_user,
user['id'])
def test_create_null_user_name(self):
user = {'name': None,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex}
self.assertRaises(exception.ValidationError,
self.identity_api.create_user,
user)
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user_by_name,
user['name'],
DEFAULT_DOMAIN_ID)
def test_create_user_case_sensitivity(self):
# user name case sensitivity is down to the fact that it is marked as
# an SQL UNIQUE column, which may not be valid for other backends, like
# LDAP.
# create a ref with a lowercase name
ref = {
'name': uuid.uuid4().hex.lower(),
'domain_id': DEFAULT_DOMAIN_ID}
ref = self.identity_api.create_user(ref)
# assign a new ID with the same name, but this time in uppercase
ref['name'] = ref['name'].upper()
self.identity_api.create_user(ref)
def test_create_project_case_sensitivity(self):
# project name case sensitivity is down to the fact that it is marked
# as an SQL UNIQUE column, which may not be valid for other backends,
# like LDAP.
# create a ref with a lowercase name
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex.lower(),
'domain_id': DEFAULT_DOMAIN_ID}
self.resource_api.create_project(ref['id'], ref)
# assign a new ID with the same name, but this time in uppercase
ref['id'] = uuid.uuid4().hex
ref['name'] = ref['name'].upper()
self.resource_api.create_project(ref['id'], ref)
def test_create_null_project_name(self):
tenant = {'id': uuid.uuid4().hex,
'name': None,
'domain_id': DEFAULT_DOMAIN_ID}
self.assertRaises(exception.ValidationError,
self.resource_api.create_project,
tenant['id'],
tenant)
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
tenant['id'])
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project_by_name,
tenant['name'],
DEFAULT_DOMAIN_ID)
def test_delete_project_with_user_association(self):
user = {'name': 'fakeuser',
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'passwd'}
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_bar['id'],
user['id'])
self.resource_api.delete_project(self.tenant_bar['id'])
tenants = self.assignment_api.list_projects_for_user(user['id'])
self.assertEqual([], tenants)
def test_metadata_removed_on_delete_user(self):
# A test to check that the internal representation
# or roles is correctly updated when a user is deleted
user = {'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'passwd'}
user = self.identity_api.create_user(user)
role = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.role_api.create_role(role['id'], role)
self.assignment_api.add_role_to_user_and_project(
user['id'],
self.tenant_bar['id'],
role['id'])
self.identity_api.delete_user(user['id'])
# Now check whether the internal representation of roles
# has been deleted
self.assertRaises(exception.MetadataNotFound,
self.assignment_api._get_metadata,
user['id'],
self.tenant_bar['id'])
def test_metadata_removed_on_delete_project(self):
# A test to check that the internal representation
# or roles is correctly updated when a project is deleted
user = {'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'passwd'}
user = self.identity_api.create_user(user)
role = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.role_api.create_role(role['id'], role)
self.assignment_api.add_role_to_user_and_project(
user['id'],
self.tenant_bar['id'],
role['id'])
self.resource_api.delete_project(self.tenant_bar['id'])
# Now check whether the internal representation of roles
# has been deleted
self.assertRaises(exception.MetadataNotFound,
self.assignment_api._get_metadata,
user['id'],
self.tenant_bar['id'])
def test_update_project_returns_extra(self):
"""This tests for backwards-compatibility with an essex/folsom bug.
Non-indexed attributes were returned in an 'extra' attribute, instead
of on the entity itself; for consistency and backwards compatibility,
those attributes should be included twice.
This behavior is specific to the SQL driver.
"""
tenant_id = uuid.uuid4().hex
arbitrary_key = uuid.uuid4().hex
arbitrary_value = uuid.uuid4().hex
tenant = {
'id': tenant_id,
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
arbitrary_key: arbitrary_value}
ref = self.resource_api.create_project(tenant_id, tenant)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertIsNone(ref.get('extra'))
tenant['name'] = uuid.uuid4().hex
ref = self.resource_api.update_project(tenant_id, tenant)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key])
def test_update_user_returns_extra(self):
"""This tests for backwards-compatibility with an essex/folsom bug.
Non-indexed attributes were returned in an 'extra' attribute, instead
of on the entity itself; for consistency and backwards compatibility,
those attributes should be included twice.
This behavior is specific to the SQL driver.
"""
arbitrary_key = uuid.uuid4().hex
arbitrary_value = uuid.uuid4().hex
user = {
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex,
arbitrary_key: arbitrary_value}
ref = self.identity_api.create_user(user)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertIsNone(ref.get('password'))
self.assertIsNone(ref.get('extra'))
user['name'] = uuid.uuid4().hex
user['password'] = uuid.uuid4().hex
ref = self.identity_api.update_user(ref['id'], user)
self.assertIsNone(ref.get('password'))
self.assertIsNone(ref['extra'].get('password'))
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key])
def test_sql_user_to_dict_null_default_project_id(self):
user = {
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex}
user = self.identity_api.create_user(user)
session = sql.get_session()
query = session.query(identity_sql.User)
query = query.filter_by(id=user['id'])
raw_user_ref = query.one()
self.assertIsNone(raw_user_ref.default_project_id)
user_ref = raw_user_ref.to_dict()
self.assertNotIn('default_project_id', user_ref)
session.close()
def test_list_domains_for_user(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.resource_api.create_domain(domain['id'], domain)
user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
test_domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.resource_api.create_domain(test_domain1['id'], test_domain1)
test_domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.resource_api.create_domain(test_domain2['id'], test_domain2)
user = self.identity_api.create_user(user)
user_domains = self.assignment_api.list_domains_for_user(user['id'])
self.assertEqual(0, len(user_domains))
self.assignment_api.create_grant(user_id=user['id'],
domain_id=test_domain1['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(user_id=user['id'],
domain_id=test_domain2['id'],
role_id=self.role_member['id'])
user_domains = self.assignment_api.list_domains_for_user(user['id'])
self.assertThat(user_domains, matchers.HasLength(2))
def test_list_domains_for_user_with_grants(self):
# Create two groups each with a role on a different domain, and
# make user1 a member of both groups. Both these new domains
# should now be included, along with any direct user grants.
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.resource_api.create_domain(domain['id'], domain)
user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
user = self.identity_api.create_user(user)
group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group1 = self.identity_api.create_group(group1)
group2 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group2 = self.identity_api.create_group(group2)
test_domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.resource_api.create_domain(test_domain1['id'], test_domain1)
test_domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.resource_api.create_domain(test_domain2['id'], test_domain2)
test_domain3 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.resource_api.create_domain(test_domain3['id'], test_domain3)
self.identity_api.add_user_to_group(user['id'], group1['id'])
self.identity_api.add_user_to_group(user['id'], group2['id'])
# Create 3 grants, one user grant, the other two as group grants
self.assignment_api.create_grant(user_id=user['id'],
domain_id=test_domain1['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(group_id=group1['id'],
domain_id=test_domain2['id'],
role_id=self.role_admin['id'])
self.assignment_api.create_grant(group_id=group2['id'],
domain_id=test_domain3['id'],
role_id=self.role_admin['id'])
user_domains = self.assignment_api.list_domains_for_user(user['id'])
self.assertThat(user_domains, matchers.HasLength(3))
def test_list_domains_for_user_with_inherited_grants(self):
"""Test that inherited roles on the domain are excluded.
Test Plan:
- Create two domains, one user, group and role
- Domain1 is given an inherited user role, Domain2 an inherited
group role (for a group of which the user is a member)
- When listing domains for user, neither domain should be returned
"""
domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
domain1 = self.resource_api.create_domain(domain1['id'], domain1)
domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
domain2 = self.resource_api.create_domain(domain2['id'], domain2)
user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain1['id'], 'enabled': True}
user = self.identity_api.create_user(user)
group = {'name': uuid.uuid4().hex, 'domain_id': domain1['id']}
group = self.identity_api.create_group(group)
self.identity_api.add_user_to_group(user['id'], group['id'])
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.role_api.create_role(role['id'], role)
# Create a grant on each domain, one user grant, one group grant,
# both inherited.
self.assignment_api.create_grant(user_id=user['id'],
domain_id=domain1['id'],
role_id=role['id'],
inherited_to_projects=True)
self.assignment_api.create_grant(group_id=group['id'],
domain_id=domain2['id'],
role_id=role['id'],
inherited_to_projects=True)
user_domains = self.assignment_api.list_domains_for_user(user['id'])
# No domains should be returned since both domains have only inherited
# roles assignments.
self.assertThat(user_domains, matchers.HasLength(0))
class SqlTrust(SqlTests, test_backend.TrustTests):
pass
class SqlToken(SqlTests, test_backend.TokenTests):
def test_token_revocation_list_uses_right_columns(self):
# This query used to be heavy with too many columns. We want
# to make sure it is only running with the minimum columns
# necessary.
expected_query_args = (token_sql.TokenModel.id,
token_sql.TokenModel.expires)
with mock.patch.object(token_sql, 'sql') as mock_sql:
tok = token_sql.Token()
tok.list_revoked_tokens()
mock_query = mock_sql.get_session().query
mock_query.assert_called_with(*expected_query_args)
def test_flush_expired_tokens_batch(self):
# TODO(dstanek): This test should be rewritten to be less
# brittle. The code will likely need to be changed first. I
# just copied the spirit of the existing test when I rewrote
# mox -> mock. These tests are brittle because they have the
# call structure for SQLAlchemy encoded in them.
# test sqlite dialect
with mock.patch.object(token_sql, 'sql') as mock_sql:
mock_sql.get_session().bind.dialect.name = 'sqlite'
tok = token_sql.Token()
tok.flush_expired_tokens()
filter_mock = mock_sql.get_session().query().filter()
self.assertFalse(filter_mock.limit.called)
self.assertTrue(filter_mock.delete.called_once)
def test_flush_expired_tokens_batch_mysql(self):
# test mysql dialect, we don't need to test IBM DB SA separately, since
# other tests below test the differences between how they use the batch
# strategy
with mock.patch.object(token_sql, 'sql') as mock_sql:
mock_sql.get_session().query().filter().delete.return_value = 0
mock_sql.get_session().bind.dialect.name = 'mysql'
tok = token_sql.Token()
expiry_mock = mock.Mock()
ITERS = [1, 2, 3]
expiry_mock.return_value = iter(ITERS)
token_sql._expiry_range_batched = expiry_mock
tok.flush_expired_tokens()
# The expiry strategy is only invoked once, the other calls are via
# the yield return.
self.assertEqual(1, expiry_mock.call_count)
mock_delete = mock_sql.get_session().query().filter().delete
self.assertThat(mock_delete.call_args_list,
matchers.HasLength(len(ITERS)))
def test_expiry_range_batched(self):
upper_bound_mock = mock.Mock(side_effect=[1, "final value"])
sess_mock = mock.Mock()
query_mock = sess_mock.query().filter().order_by().offset().limit()
query_mock.one.side_effect = [['test'], sql.NotFound()]
for i, x in enumerate(token_sql._expiry_range_batched(sess_mock,
upper_bound_mock,
batch_size=50)):
if i == 0:
# The first time the batch iterator returns, it should return
# the first result that comes back from the database.
self.assertEqual(x, 'test')
elif i == 1:
# The second time, the database range function should return
# nothing, so the batch iterator returns the result of the
# upper_bound function
self.assertEqual(x, "final value")
else:
self.fail("range batch function returned more than twice")
def test_expiry_range_strategy_sqlite(self):
tok = token_sql.Token()
sqlite_strategy = tok._expiry_range_strategy('sqlite')
self.assertEqual(token_sql._expiry_range_all, sqlite_strategy)
def test_expiry_range_strategy_ibm_db_sa(self):
tok = token_sql.Token()
db2_strategy = tok._expiry_range_strategy('ibm_db_sa')
self.assertIsInstance(db2_strategy, functools.partial)
self.assertEqual(db2_strategy.func, token_sql._expiry_range_batched)
self.assertEqual(db2_strategy.keywords, {'batch_size': 100})
def test_expiry_range_strategy_mysql(self):
tok = token_sql.Token()
mysql_strategy = tok._expiry_range_strategy('mysql')
self.assertIsInstance(mysql_strategy, functools.partial)
self.assertEqual(mysql_strategy.func, token_sql._expiry_range_batched)
self.assertEqual(mysql_strategy.keywords, {'batch_size': 1000})
class SqlCatalog(SqlTests, test_backend.CatalogTests):
_legacy_endpoint_id_in_endpoint = True
_enabled_default_to_true_when_creating_endpoint = True
def test_catalog_ignored_malformed_urls(self):
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_service(service['id'], service.copy())
malformed_url = "http://192.168.1.104:8774/v2/$(tenant)s"
endpoint = {
'id': uuid.uuid4().hex,
'region_id': None,
'service_id': service['id'],
'interface': 'public',
'url': malformed_url,
}
self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
# NOTE(dstanek): there are no valid URLs, so nothing is in the catalog
catalog = self.catalog_api.get_catalog('fake-user', 'fake-tenant')
self.assertEqual({}, catalog)
def test_get_catalog_with_empty_public_url(self):
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_service(service['id'], service.copy())
endpoint = {
'id': uuid.uuid4().hex,
'region_id': None,
'interface': 'public',
'url': '',
'service_id': service['id'],
}
self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
catalog = self.catalog_api.get_catalog('user', 'tenant')
catalog_endpoint = catalog[endpoint['region_id']][service['type']]
self.assertEqual(service['name'], catalog_endpoint['name'])
self.assertEqual(endpoint['id'], catalog_endpoint['id'])
self.assertEqual('', catalog_endpoint['publicURL'])
self.assertIsNone(catalog_endpoint.get('adminURL'))
self.assertIsNone(catalog_endpoint.get('internalURL'))
def test_create_endpoint_region_404(self):
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_service(service['id'], service.copy())
endpoint = {
'id': uuid.uuid4().hex,
'region_id': uuid.uuid4().hex,
'service_id': service['id'],
'interface': 'public',
'url': uuid.uuid4().hex,
}
self.assertRaises(exception.ValidationError,
self.catalog_api.create_endpoint,
endpoint['id'],
endpoint.copy())
def test_create_region_invalid_id(self):
region = {
'id': '0' * 256,
'description': '',
'extra': {},
}
self.assertRaises(exception.StringLengthExceeded,
self.catalog_api.create_region,
region.copy())
def test_create_region_invalid_parent_id(self):
region = {
'id': uuid.uuid4().hex,
'parent_region_id': '0' * 256,
}
self.assertRaises(exception.RegionNotFound,
self.catalog_api.create_region,
region)
def test_delete_region_with_endpoint(self):
# create a region
region = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_region(region)
# create a child region
child_region = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'parent_id': region['id']
}
self.catalog_api.create_region(child_region)
# create a service
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_service(service['id'], service)
# create an endpoint attached to the service and child region
child_endpoint = {
'id': uuid.uuid4().hex,
'region_id': child_region['id'],
'interface': uuid.uuid4().hex[:8],
'url': uuid.uuid4().hex,
'service_id': service['id'],
}
self.catalog_api.create_endpoint(child_endpoint['id'], child_endpoint)
self.assertRaises(exception.RegionDeletionError,
self.catalog_api.delete_region,
child_region['id'])
# create an endpoint attached to the service and parent region
endpoint = {
'id': uuid.uuid4().hex,
'region_id': region['id'],
'interface': uuid.uuid4().hex[:8],
'url': uuid.uuid4().hex,
'service_id': service['id'],
}
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
self.assertRaises(exception.RegionDeletionError,
self.catalog_api.delete_region,
region['id'])
class SqlPolicy(SqlTests, test_backend.PolicyTests):
pass
class SqlInheritance(SqlTests, test_backend.InheritanceTests):
pass
class SqlTokenCacheInvalidation(SqlTests, test_backend.TokenCacheInvalidation):
def setUp(self):
super(SqlTokenCacheInvalidation, self).setUp()
self._create_test_data()
class SqlFilterTests(SqlTests, test_backend.FilterTests):
def _get_user_name_field_size(self):
return identity_sql.User.name.type.length
def clean_up_entities(self):
"""Clean up entity test data from Filter Test Cases."""
for entity in ['user', 'group', 'project']:
self._delete_test_data(entity, self.entity_list[entity])
self._delete_test_data(entity, self.domain1_entity_list[entity])
del self.entity_list
del self.domain1_entity_list
self.domain1['enabled'] = False
self.resource_api.update_domain(self.domain1['id'], self.domain1)
self.resource_api.delete_domain(self.domain1['id'])
del self.domain1
def test_list_entities_filtered_by_domain(self):
# NOTE(henry-nash): This method is here rather than in test_backend
# since any domain filtering with LDAP is handled by the manager
# layer (and is already tested elsewhere) not at the driver level.
self.addCleanup(self.clean_up_entities)
self.domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.resource_api.create_domain(self.domain1['id'], self.domain1)
self.entity_list = {}
self.domain1_entity_list = {}
for entity in ['user', 'group', 'project']:
# Create 5 entities, 3 of which are in domain1
DOMAIN1_ENTITIES = 3
self.entity_list[entity] = self._create_test_data(entity, 2)
self.domain1_entity_list[entity] = self._create_test_data(
entity, DOMAIN1_ENTITIES, self.domain1['id'])
# Should get back the DOMAIN1_ENTITIES in domain1
hints = driver_hints.Hints()
hints.add_filter('domain_id', self.domain1['id'])
entities = self._list_entities(entity)(hints=hints)
self.assertEqual(DOMAIN1_ENTITIES, len(entities))
self._match_with_list(entities, self.domain1_entity_list[entity])
# Check the driver has removed the filter from the list hints
self.assertFalse(hints.get_exact_filter_by_name('domain_id'))
def test_filter_sql_injection_attack(self):
"""Test against sql injection attack on filters
Test Plan:
- Attempt to get all entities back by passing a two-term attribute
- Attempt to piggyback filter to damage DB (e.g. drop table)
"""
# Check we have some users
users = self.identity_api.list_users()
self.assertTrue(len(users) > 0)
hints = driver_hints.Hints()
hints.add_filter('name', "anything' or 'x'='x")
users = self.identity_api.list_users(hints=hints)
self.assertEqual(0, len(users))
# See if we can add a SQL command...use the group table instead of the
# user table since 'user' is reserved word for SQLAlchemy.
group = {'name': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID}
group = self.identity_api.create_group(group)
hints = driver_hints.Hints()
hints.add_filter('name', "x'; drop table group")
groups = self.identity_api.list_groups(hints=hints)
self.assertEqual(0, len(groups))
groups = self.identity_api.list_groups()
self.assertTrue(len(groups) > 0)
class SqlLimitTests(SqlTests, test_backend.LimitTests):
def setUp(self):
super(SqlLimitTests, self).setUp()
test_backend.LimitTests.setUp(self)
class FakeTable(sql.ModelBase):
__tablename__ = 'test_table'
col = sql.Column(sql.String(32), primary_key=True)
@sql.handle_conflicts('keystone')
def insert(self):
raise db_exception.DBDuplicateEntry
@sql.handle_conflicts('keystone')
def update(self):
raise db_exception.DBError(
inner_exception=exc.IntegrityError('a', 'a', 'a'))
@sql.handle_conflicts('keystone')
def lookup(self):
raise KeyError
class SqlDecorators(tests.TestCase):
def test_initialization_fail(self):
self.assertRaises(exception.StringLengthExceeded,
FakeTable, col='a' * 64)
def test_initialization(self):
tt = FakeTable(col='a')
self.assertEqual('a', tt.col)
def test_non_ascii_init(self):
# NOTE(I159): Non ASCII characters must cause UnicodeDecodeError
# if encoding is not provided explicitly.
self.assertRaises(UnicodeDecodeError, FakeTable, col='Я')
def test_conflict_happend(self):
self.assertRaises(exception.Conflict, FakeTable().insert)
self.assertRaises(exception.UnexpectedError, FakeTable().update)
def test_not_conflict_error(self):
self.assertRaises(KeyError, FakeTable().lookup)
class SqlModuleInitialization(tests.TestCase):
@mock.patch.object(sql.core, 'CONF')
@mock.patch.object(options, 'set_defaults')
def test_initialize_module(self, set_defaults, CONF):
sql.initialize()
set_defaults.assert_called_with(CONF,
connection='sqlite:///keystone.db')
class SqlCredential(SqlTests):
def _create_credential_with_user_id(self, user_id=uuid.uuid4().hex):
credential_id = uuid.uuid4().hex
new_credential = {
'id': credential_id,
'user_id': user_id,
'project_id': uuid.uuid4().hex,
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'extra': uuid.uuid4().hex
}
self.credential_api.create_credential(credential_id, new_credential)
return new_credential
def _validateCredentialList(self, retrieved_credentials,
expected_credentials):
self.assertEqual(len(retrieved_credentials), len(expected_credentials))
retrived_ids = [c['id'] for c in retrieved_credentials]
for cred in expected_credentials:
self.assertIn(cred['id'], retrived_ids)
def setUp(self):
super(SqlCredential, self).setUp()
self.credentials = []
for _ in range(3):
self.credentials.append(
self._create_credential_with_user_id())
self.user_credentials = []
for _ in range(3):
cred = self._create_credential_with_user_id(self.user_foo['id'])
self.user_credentials.append(cred)
self.credentials.append(cred)
def test_list_credentials(self):
credentials = self.credential_api.list_credentials()
self._validateCredentialList(credentials, self.credentials)
# test filtering using hints
hints = driver_hints.Hints()
hints.add_filter('user_id', self.user_foo['id'])
credentials = self.credential_api.list_credentials(hints)
self._validateCredentialList(credentials, self.user_credentials)
def test_list_credentials_for_user(self):
credentials = self.credential_api.list_credentials_for_user(
self.user_foo['id'])
self._validateCredentialList(credentials, self.user_credentials)
class DeprecatedDecorators(SqlTests):
def setUp(self):
super(DeprecatedDecorators, self).setUp()
# The only reason this is here is because report_deprecated_feature()
# registers the fatal_deprecations option which these tests use.
versionutils.report_deprecated_feature(
log.getLogger(__name__), 'ignore this message')
def test_assignment_to_role_api(self):
"""Test that calling one of the methods does call LOG.deprecated.
This method is really generic to the type of backend, but we need
one to execute the test, so the SQL backend is as good as any.
"""
# Rather than try and check that a log message is issued, we
# enable fatal_deprecations so that we can check for the
# raising of the exception.
# First try to create a role without enabling fatal deprecations,
# which should work due to the cross manager deprecated calls.
role_ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_ref['id'], role_ref)
self.role_api.get_role(role_ref['id'])
# Now enable fatal exceptions - creating a role by calling the
# old manager should now fail.
self.config_fixture.config(fatal_deprecations=True)
role_ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assertRaises(versionutils.DeprecatedConfig,
self.assignment_api.create_role,
role_ref['id'], role_ref)
def test_assignment_to_resource_api(self):
"""Test that calling one of the methods does call LOG.deprecated.
This method is really generic to the type of backend, but we need
one to execute the test, so the SQL backend is as good as any.
"""
# Rather than try and check that a log message is issued, we
# enable fatal_deprecations so that we can check for the
# raising of the exception.
# First try to create a project without enabling fatal deprecations,
# which should work due to the cross manager deprecated calls.
project_ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID}
self.resource_api.create_project(project_ref['id'], project_ref)
self.resource_api.get_project(project_ref['id'])
# Now enable fatal exceptions - creating a project by calling the
# old manager should now fail.
self.config_fixture.config(fatal_deprecations=True)
project_ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID}
self.assertRaises(versionutils.DeprecatedConfig,
self.assignment_api.create_project,
project_ref['id'], project_ref)
| apache-2.0 | -6,386,833,707,063,816,000 | 40.084337 | 79 | 0.584751 | false |
Arzaroth/virtualnetwork | network/mapParser.py | 1 | 4727 | #!/usr/bin/python3.3 -O
from pyrser import grammar,meta
from pyrser.directives import ignore
from network import Host, Router
import sys
def insensitiveCase(s):
return "[" + " ".join("['" + "'|'".join(x) + "']" for x in map((lambda each: [each.lower(), each.upper()]), s)) + "]"
class MapParser(grammar.Grammar):
entry = "Map"
grammar = """
Map = [#init_map(_) @ignore("null") [[[Hosts:h #add_host(_, h)] | [Routers:r #add_router(_, r)]] eol*]+
#link_hosts(_) eof]
Hosts = [#init_host(_) '[' ws {host} ws ']' eol+ [[Name | Ip | TTL | Route]:r #add_fhost(_, r)]+]
Routers = [#init_router(_) '[' ws {router} ws ']' eol+ [[Name | Ip | TTL | Route]:r #add_frouter(_, r)]+]
Name = [ws {name} ws '=' ws id:i #ret_f(_, "id", i) ws eol+]
Ip = [ws {ip} ws '=' ws cidraddr:c #ret_f(_, "ip", c) ws eol+]
TTL = [ws {ttl} ws '=' ws num:n #ret_f(_, "ttl", n) ws eol+]
Route = [ws {route} ws '=' ws [[{default}:c ws id:i #ret_f(_, "route", c, i)]
| [cidraddr:c ws id:i #ret_f(_, "route", c, i)]] ws eol+]
cidraddr = [num '.' num '.' num '.' num '/' num]
ws = [[' ' | '\r' | '\t']*]
""".format(host = insensitiveCase("Host"),
router = insensitiveCase("Router"),
route = insensitiveCase("Route"),
ip = insensitiveCase("IP"),
ttl = insensitiveCase("TTL"),
name = insensitiveCase("Name"),
default = insensitiveCase("Default"),
internet = insensitiveCase("Internet"))
@meta.hook(MapParser)
def init_map(self, ast):
ast.network = {}
ast.routes = {}
return True
@meta.hook(MapParser)
def init_host(self, ast):
self.init_map(ast)
ast.network["route"] = []
return True
@meta.hook(MapParser)
def init_router(self, ast):
self.init_host(ast)
ast.network["ips"] = []
return True
@meta.hook(MapParser)
def link_hosts(self, ast):
for k,v in ast.routes.items():
for x in v:
if x[1] not in ast.network:
raise Exception("Unknown host ({0}) for {1} route.".format(x[1], k))
ast.network[k].addRoute(ast.network[x[1]], x[0])
return True
def base_add(ast, h):
if "name" not in h.network:
raise Exception("Missing name field for given host:\n{0}".format(self.value(h)))
if h.network["name"] in ast.network:
raise Exception("Redefinion of {0}.".format(h.network["name"]))
ast.routes[h.network["name"]] = h.network["route"][::]
@meta.hook(MapParser)
def add_host(self, ast, h):
base_add(ast, h)
if "ip" not in h.network:
raise Exception("Missing ip field for given host:\n{0}".format(self.value(h)))
if "ttl" in h.network:
ast.network[h.network["name"]] = Host(h.network["name"],
h.network["ip"], h.network["ttl"])
else:
ast.network[h.network["name"]] = Host(h.network["name"],
h.network["ip"])
return True
@meta.hook(MapParser)
def add_router(self, ast, h):
base_add(ast, h)
if not h.network["ips"]:
raise Exception("Missing ip field for given host")
if "ttl" in h.network:
ast.network[h.network["name"]] = Router(h.network["name"],
*h.network["ips"], ttl = h.network["ttl"])
else:
ast.network[h.network["name"]] = Router(h.network["name"],
*h.network["ips"])
return True
@meta.hook(MapParser)
def ret_f(self, ast, *args):
ast.retvals = [args[0]]
ast.retvals.extend([self.value(x) for x in args[1:]])
return True
@meta.hook(MapParser)
def add_fhost(self, ast, r):
def reg_name(ast, name):
ast.network["name"] = name[0]
def reg_ip(ast, ip):
ast.network["ip"] = ip[0]
def reg_ttl(ast, ttl):
ast.network["ttl"] = ttl[0]
def reg_route(ast, route):
ast.network["route"].append(route)
fmap = {'id' : reg_name,
'ip' : reg_ip,
'ttl' : reg_ttl,
'route' : reg_route}
if r.retvals[0] in fmap:
fmap[r.retvals[0]](ast, r.retvals[1:])
return True
@meta.hook(MapParser)
def add_frouter(self, ast, r):
def reg_name(ast, name):
ast.network["name"] = name[0]
def reg_ip(ast, ip):
ast.network["ips"].append(ip[0])
def reg_ttl(ast, ttl):
ast.network["ttl"] = ttl[0]
def reg_route(ast, route):
ast.network["route"].append(route)
fmap = {'id' : reg_name,
'ip' : reg_ip,
'ttl' : reg_ttl,
'route' : reg_route}
if r.retvals[0] in fmap:
fmap[r.retvals[0]](ast, r.retvals[1:])
return True
| gpl-3.0 | 5,205,902,679,381,287,000 | 31.826389 | 121 | 0.533108 | false |
qma/pants | src/python/pants/backend/jvm/tasks/resources_task.py | 1 | 3335 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from abc import abstractmethod
from pants.backend.core.tasks.task import Task
from pants.option.custom_types import list_option
class ResourcesTask(Task):
"""A base class for tasks that process or create resource files.
This base assumes that resources targets or targets that generate resources are independent from
each other and can be processed in isolation in any order.
"""
@classmethod
def product_types(cls):
return ['runtime_classpath']
@classmethod
def register_options(cls, register):
super(ResourcesTask, cls).register_options(register)
register('--confs', advanced=True, type=list_option, default=['default'],
help='Prepare resources for these Ivy confs.')
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data('compile_classpath')
@property
def cache_target_dirs(self):
return True
def execute(self):
# Tracked and returned for use in tests.
# TODO: Rewrite those tests. execute() is not supposed to return anything.
processed_targets = []
compile_classpath = self.context.products.get_data('compile_classpath')
runtime_classpath = self.context.products.get_data('runtime_classpath', compile_classpath.copy)
all_relevant_resources_targets = self.find_all_relevant_resources_targets()
if not all_relevant_resources_targets:
return processed_targets
with self.invalidated(targets=all_relevant_resources_targets,
fingerprint_strategy=self.create_invalidation_strategy(),
invalidate_dependents=False,
topological_order=False) as invalidation:
for vt in invalidation.all_vts:
# Register the target's chroot in the products.
for conf in self.get_options().confs:
runtime_classpath.add_for_target(vt.target, [(conf, vt.results_dir)])
# And if it was invalid, generate the resources to the chroot.
if not vt.valid:
self.prepare_resources(vt.target, vt.results_dir)
processed_targets.append(vt.target)
vt.update()
return processed_targets
@abstractmethod
def find_all_relevant_resources_targets(self):
"""Returns an iterable over all the relevant resources targets in the context."""
def create_invalidation_strategy(self):
"""Creates a custom fingerprint strategy for determining invalid resources targets.
:returns: A custom fingerprint strategy to use for determining invalid targets, or `None` to
use the standard target payload.
:rtype: :class:`pants.base.fingerprint_strategy.FingerprintStrategy`
"""
return None
@abstractmethod
def prepare_resources(self, target, chroot):
"""Prepares the resources associated with `target` in the given `chroot`.
:param target: The target to prepare resource files for.
:type target: :class:`pants.build_graph.target.Target`
:param string chroot: An existing, clean chroot dir to generate `target`'s resources to.
"""
| apache-2.0 | 8,281,013,611,547,203,000 | 37.333333 | 99 | 0.704648 | false |
opennode/waldur-mastermind | src/waldur_core/structure/managers.py | 1 | 4844 | from django.db import models
from waldur_core.core.managers import GenericKeyMixin
def get_permission_subquery(permissions, user):
subquery = models.Q()
for entity in ('customer', 'project'):
path = getattr(permissions, '%s_path' % entity, None)
if not path:
continue
if path == 'self':
prefix = 'permissions__'
else:
prefix = path + '__permissions__'
kwargs = {prefix + 'user': user, prefix + 'is_active': True}
subquery |= models.Q(**kwargs)
build_query = getattr(permissions, 'build_query', None)
if build_query:
subquery |= build_query(user)
return subquery
def filter_queryset_for_user(queryset, user):
if user is None or user.is_staff or user.is_support:
return queryset
try:
permissions = queryset.model.Permissions
except AttributeError:
return queryset
subquery = get_permission_subquery(permissions, user)
if not subquery:
return queryset
return queryset.filter(subquery).distinct()
class StructureQueryset(models.QuerySet):
""" Provides additional filtering by customer or project (based on permission definition).
Example:
.. code-block:: python
Instance.objects.filter(project=12)
Droplet.objects.filter(
customer__name__startswith='A',
state=Droplet.States.ONLINE)
Droplet.objects.filter(Q(customer__name='Alice') | Q(customer__name='Bob'))
"""
def exclude(self, *args, **kwargs):
return super(StructureQueryset, self).exclude(
*[self._patch_query_argument(a) for a in args],
**self._filter_by_custom_fields(**kwargs)
)
def filter(self, *args, **kwargs):
return super(StructureQueryset, self).filter(
*[self._patch_query_argument(a) for a in args],
**self._filter_by_custom_fields(**kwargs)
)
def _patch_query_argument(self, arg):
# patch Q() objects if passed and add support of custom fields
if isinstance(arg, models.Q):
children = []
for opt in arg.children:
if isinstance(opt, models.Q):
children.append(self._patch_query_argument(opt))
else:
args = self._filter_by_custom_fields(**dict([opt]))
children.append(tuple(args.items())[0])
arg.children = children
return arg
def _filter_by_custom_fields(self, **kwargs):
# traverse over filter arguments in search of custom fields
args = {}
fields = [f.name for f in self.model._meta.get_fields()]
for field, val in kwargs.items():
base_field = field.split('__')[0]
if base_field in fields:
args.update(**{field: val})
elif base_field in ('customer', 'project'):
args.update(self._filter_by_permission_fields(base_field, field, val))
else:
args.update(**{field: val})
return args
def _filter_by_permission_fields(self, name, field, value):
# handle fields connected via permissions relations
extra = '__'.join(field.split('__')[1:]) if '__' in field else None
try:
# look for the target field path in Permissions class,
path = getattr(self.model.Permissions, '%s_path' % name)
except AttributeError:
# fallback to FieldError if it's missed
return {field: value}
else:
if path == 'self':
if extra:
return {extra: value}
else:
return {
'pk': value.pk if isinstance(value, models.Model) else value
}
else:
if extra:
path += '__' + extra
return {path: value}
StructureManager = models.Manager.from_queryset(StructureQueryset)
class ServiceSettingsManager(GenericKeyMixin, models.Manager):
""" Allows to filter and get service settings by generic key """
def get_available_models(self):
""" Return list of models that are acceptable """
from waldur_core.structure.models import BaseResource
return BaseResource.get_all_models()
class SharedServiceSettingsManager(ServiceSettingsManager):
def get_queryset(self):
return (
super(SharedServiceSettingsManager, self).get_queryset().filter(shared=True)
)
class PrivateServiceSettingsManager(ServiceSettingsManager):
def get_queryset(self):
return (
super(PrivateServiceSettingsManager, self)
.get_queryset()
.filter(shared=False)
)
| mit | 4,239,172,662,321,143,000 | 31.293333 | 94 | 0.579067 | false |
pytorch/fairseq | tests/test_online_backtranslation.py | 1 | 7650 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import unittest
from pathlib import Path
from typing import Any, Dict, Sequence
import fairseq.data.indexed_dataset as indexed_dataset
import fairseq.options
import fairseq.tasks.online_backtranslation as obt
import torch
from tests import utils
def mk_sample(tokens: Sequence[int], batch_size: int = 2) -> Dict[str, Any]:
batch = torch.stack([torch.tensor(tokens, dtype=torch.long)] * batch_size)
sample = {
"net_input": {
"src_tokens": batch,
"prev_output_tokens": batch,
"src_lengths": torch.tensor([len(tokens)] * batch_size, dtype=torch.long),
},
"target": batch[:, 1:],
}
return sample
def mk_dataset(num_samples: int, max_len: int, output: Path):
output.parent.mkdir(exist_ok=True)
idx = indexed_dataset.IndexedDatasetBuilder(str(output))
data = torch.randint(5, 100, (num_samples, max_len))
lengths = torch.randint(3, max_len, (num_samples,))
for d, l in zip(data, lengths):
d[0] = 0
idx.add_item(d[:l])
idx.finalize(output.with_suffix(".idx"))
assert output.exists()
assert output.with_suffix(".idx").exists()
class OnlineBacktranslationTest(unittest.TestCase):
tmp_dir = Path(tempfile.mkdtemp(suffix="OnlineBacktranslationTest"))
@classmethod
def obt_task(
cls, languages: Sequence[str], data: Path = None, language_mapping: str = None
):
dict_path = cls.tmp_dir / "dict.txt"
if not dict_path.exists():
dictionary = utils.dummy_dictionary(100)
dictionary.save(str(dict_path))
if data is not None:
(data / "dict.txt").write_text(dict_path.read_text())
else:
data = cls.tmp_dir
assert len(languages) >= 2
kwargs = {
"arch": "transformer",
# --max-sentences=1 for better predictability of batches
"max_sentences": 1,
# Use characteristics dimensions
"encoder_layers": 3,
"encoder_embed_dim": 12,
"encoder_ffn_embed_dim": 14,
"encoder_attention_heads": 4,
"decoder_layers": 3,
"decoder_embed_dim": 12,
"decoder_output_dim": 12,
"decoder_ffn_embed_dim": 14,
"decoder_attention_heads": 4,
# Disable dropout so we have comparable tests.
"dropout": 0,
"attention_dropout": 0,
"activation_dropout": 0,
"encoder_layerdrop": 0,
}
args = fairseq.options.get_args(
data,
task="online_backtranslation",
mono_langs=",".join(languages),
valid_lang_pairs=f"{languages[0]}-{languages[1]}",
tokens_per_sample=256,
language_mapping=language_mapping,
**kwargs,
)
task = obt.OnlineBackTranslationTask.setup_task(args)
# we need to build the model to have the correct dictionary
model = task.build_model(task.args)
return task, model
def tmp_path(self, test_case: str) -> Path:
return Path(tempfile.mkdtemp(test_case, dir=self.tmp_dir))
def test_lang_tokens(self):
task, model = self.obt_task(["en", "ro", "zh"])
assert obt._lang_token("en") in task.dictionary
assert obt._lang_token("ro") in task.dictionary
assert obt._lang_token("zh") in task.dictionary
en_bos = obt._lang_token_index(task.common_dict, "en")
assert "en" == task.common_dict[en_bos].strip("_")
zh_bos = obt._lang_token_index(task.common_dict, "zh")
assert "zh" == task.common_dict[zh_bos].strip("_")
zh_sample = mk_sample([zh_bos, 16, 14, 12, 10])
# we expect to receive the bos token for translation
assert task.get_bos_token_from_sample(zh_sample) == en_bos
def test_backtranslate_sample(self):
task, model = self.obt_task(["en", "ro", "zh"])
en_bos = obt._lang_token_index(task.common_dict, "en")
zh_bos = obt._lang_token_index(task.common_dict, "zh")
sample = mk_sample([zh_bos, 16, 14, 12, 10])
task.backtranslate_sample(sample, "zh", "en")
target_zh = list(sample["target"][0])
assert target_zh == [16, 14, 12, 10] # original zh sentence
generated_en = sample["net_input"]["src_tokens"][0]
assert generated_en[0] == en_bos
def test_train_dataset(self):
data = self.tmp_path("test_train_dataset")
mk_dataset(20, 10, data / "en" / "train.bin")
mk_dataset(10, 10, data / "zh" / "train.bin")
task, model = self.obt_task(["en", "zh"], data)
task.load_dataset("train")
en_bos = obt._lang_token_index(task.common_dict, "en")
zh_bos = obt._lang_token_index(task.common_dict, "zh")
train = task.datasets["train"]
train.ordered_indices()
train.prefetch([0, 19])
sample_0 = train[0]
sample_19 = train[19]
self.assertEqual(
set(sample_0.keys()), {"en-BT", "en-DENOISE", "zh-BT", "zh-DENOISE"}
)
for sample in (sample_0, sample_19):
self.assertEqual(sample["en-BT"]["source"][0], en_bos)
# bt target isn't ready to look at.
self.assertEqual(sample["en-DENOISE"]["source"][0], en_bos)
# TODO What could we check on the target side ?
for i in range(10):
# Zh dataset is shorter, and is wrapped around En dataset.
train.prefetch([i, i + 10])
self.assertEqual(
list(train[i]["zh-DENOISE"]["source"]),
list(train[i + 10]["zh-DENOISE"]["source"]),
)
self.assertEqual(train[i]["zh-DENOISE"]["source"][0].item(), zh_bos)
# Sorted by increasing len
self.assertLess(
len(sample_0["en-BT"]["source"]), len(sample_19["en-BT"]["source"])
)
def test_valid_dataset(self):
data = self.tmp_path("test_valid_dataset")
mk_dataset(10, 21, data / "valid.en-zh.en.bin")
mk_dataset(10, 21, data / "valid.en-zh.zh.bin")
task, model = self.obt_task(["en", "zh"], data)
valid = task.load_dataset("valid")
en_bos = obt._lang_token_index(task.common_dict, "en")
assert valid is not None
valid.prefetch(range(10))
sample_0 = valid[0]
sample_9 = valid[9]
self.assertEqual(sample_0["id"], 0)
self.assertEqual(sample_9["id"], 9)
self.assertEqual(sample_0["source"][0], en_bos)
self.assertEqual(sample_9["source"][0], en_bos)
# TODO: could we test the target side ?
def assertFnMatch(self, fn, values):
for x, y in values.items():
fn_x = fn(x)
self.assertEqual(fn_x, y, f"Fn has wrong value: fn({x}) = {fn_x} != {y}")
def test_piecewise_linear_fn(self):
self.assertFnMatch(
obt.PiecewiseLinearFn.from_string("1.0"), {0: 1, 100: 1, 500: 1, 1000: 1}
)
self.assertFnMatch(
obt.PiecewiseLinearFn.from_string("0:1,1000:0"),
{0: 1, 500: 0.5, 1000: 0, 2000: 0},
)
self.assertFnMatch(
obt.PiecewiseLinearFn.from_string("0:0,1000:1"),
{0: 0, 500: 0.5, 1000: 1, 2000: 1},
)
self.assertFnMatch(
obt.PiecewiseLinearFn.from_string("0:0,1000:1,2000:0"),
{0: 0, 500: 0.5, 1000: 1, 1500: 0.5, 2000: 0, 3000: 0},
)
| mit | 6,307,616,552,569,338,000 | 36.135922 | 86 | 0.570458 | false |
mkocka/galaxytea | modeling/domcek/plots.py | 1 | 4294 | import matplotlib.pyplot as plt
from numpy import *
###List of variables
# r_in [10**10 cm] innder radius
# r_out [10**10 cm] outer radius
# step [10**10 cm] step of plot
# alfa [] parameter of accretion
# M_16 [10**16 g.s**(-1)] accretion flow
# m_1 [solar mass] mass of compact object
# R_hv [10**10 cm] radius of compact object
# R_10 [10**10 cm] distance from compact object
# f numerical factor
###List of computed parameters
# Surface density [g.cm**(-2)] (sigma)
# Height [cm] (H)
# Density [g.cm**(-3)] (rho)
# Central disc temeprature [K] (T_c)
# Opacity [] (tau)
# viscosity [cm**2.s**(-1)] (nu)
# radial velocity towards center [cm.s**(-1)] (v_r)
###function solutions parameters
# parameter 1 r_in
# parameter 2 r_out
# parameter 3 step
# parameter 4 alfa
# parameter 5 M_16
# parameter 6 m_1
# parameter 7 R_hv
def solutions(r_in,r_out,step,alfa,M_16,m_1,R_hv):
#defining lists
list_function = arange(r_in,r_out,step)
R_10_l,surface_density_l,height_l,density_l,Fx = ([] for i in range(5))
temperature_l,opacity_l,viscosity_l,radial_velocity_l = ([] for i in range(4))
#computation and appending to lists
for R_10 in list_function:
f=(1-((R_hv)/(R_10))**(1.0/2))**(1.0/4)
surface_density = 5.2*alfa**(-4.0/5)*M_16**(7.0/10)*m_1**(1.0/4)*R_10**(-3.0/4)*f**(14.0/5)
height = 1.7*10**8*alfa**(-1.0/10)*M_16**(3.0/20)*m_1**(-3.0/8)*R_10**(9.0/8)*f**(3.0/5)
density = 3.1*10**(-8)*alfa**(-7.0/10)*M_16**(11.0/20)*m_1**(5.0/8)*R_10**(-15.0/8)*f**(11.0/5)
temperature = 1.4*10**4*alfa**(-1.0/5)*M_16**(3.0/10)*m_1**(1.0/4)*R_10**(-3.0/4)*f**(6.0/5)
opacity = 190*alfa**(-4.0/5)*M_16**(1.0/5)*f**(4.0/5)
viscosity = 1.8*10**14*alfa**(4.0/5)*M_16**(3.0/10)*m_1**(-1.0/4)*R_10**(3.0/4)*f**(6.0/5)
radial_velocity = 2.7*10**4*alfa**(4.0/5)*M_16**(3.0/10)*m_1**(-1.0/4)*R_10**(-1.0/4)*f**(-14.0/5)
R_10_l.append(R_10)
surface_density_l.append(surface_density)
height_l.append(height)
density_l.append(density)
temperature_l.append(temperature)
opacity_l.append(opacity)
viscosity_l.append(viscosity)
radial_velocity_l.append(radial_velocity)
Fx.append(f)
#transformation R_10 to kolimeters
R_km = [ x / 10**(-4) for x in R_10_l]
return R_km, surface_density_l, height_l, density_l,temperature_l,opacity_l,viscosity_l,radial_velocity_l,Fx
#for definitions of parameters look up
r_in =1.0001*10**(-4)
r_out =10**(-2)
step = 10**(-6)
alfa = 0.5
M_16 = 63
m_1 = 1.5
R_hv = 1.0*10**(-4)
lists=solutions(r_in,r_out,step,alfa,M_16,m_1,R_hv)
print 30*"-"
print "Used parameter values"
print 30*"-"
print "innder radius:", 10*".",r_in, 10*".", "[10$^{10}$ cm]"
print "outer radius:", 10*".", r_out, 10*".", "[10$^{10}$ cm]"
print "step of plot:", 10*".", step, 10*".", "[10$^{10}$ cm]"
print "parameter of accretion alfa:", 10*".", alfa
print "accretion flow:", 10*".", M_16, 10*".", "[10$^6$ g.s${-1)}$]"
print "mass of compact object:", 10*".", m_1, 10*".", "[solar mass]"
print "radius of compact object:", 10*".", R_hv, 10*".", "[10$^{10}$ cm]"
plt.plot(lists[0], lists[1])
plt.title('surface density')
plt.xlabel('radius [km]')
plt.ylabel('surface density [g.cm$^{-2}$] ')
plt.grid()
plt.savefig("surface density")
plt.gcf().clear()
plt.plot(lists[0], lists[2])
plt.title('height')
plt.xlabel('radius [km]')
plt.ylabel('height [cm] ')
plt.grid()
plt.savefig("height")
plt.gcf().clear()
plt.plot(lists[0], lists[3])
plt.title('density')
plt.xlabel('radius [km]')
plt.ylabel('density [g.cm$^{-3}$] ')
plt.grid()
plt.savefig("density")
plt.gcf().clear()
plt.plot(lists[0], lists[4])
plt.title('temperature')
plt.xlabel('radius [km]')
plt.ylabel('temperature [K] ')
plt.grid()
plt.savefig("temperature")
plt.gcf().clear()
plt.plot(lists[0], lists[5])
plt.title('opacity')
plt.xlabel('radius [km]')
plt.ylabel('opacity ')
plt.grid()
plt.savefig("opacity")
plt.gcf().clear()
plt.plot(lists[0], lists[6])
plt.title('viscosity')
plt.xlabel('radius [km]')
plt.ylabel('viscosity [cm$^{2}$.s$^{-1}$] ')
plt.grid()
plt.savefig("viscosity")
plt.gcf().clear()
plt.plot(lists[0], lists[7])
plt.title('radial velocity')
plt.xlabel('radius [km]')
plt.ylabel('radial velocity [cm.s$^{-1}$] ')
plt.grid()
plt.savefig("radial velocity")
plt.gcf().clear()
| mit | 4,655,926,774,987,794,000 | 29.671429 | 109 | 0.608058 | false |
MobSF/Mobile-Security-Framework-MobSF | mobsf/StaticAnalyzer/views/ios/view_source.py | 1 | 5000 | # -*- coding: utf_8 -*-
"""iOS View Source."""
import io
import json
import logging
import ntpath
import os
from pathlib import Path
import biplist
from django.conf import settings
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.utils.html import escape
from mobsf.MobSF.forms import FormUtil
from mobsf.MobSF.utils import (
is_file_exists,
is_safe_path,
print_n_send_error_response,
read_sqlite,
)
from mobsf.StaticAnalyzer.forms import (
ViewSourceIOSApiForm,
ViewSourceIOSForm,
)
logger = logging.getLogger(__name__)
def set_ext_api(file_path):
"""Smart Function to set Extenstion."""
ext = file_path.split('.')[-1]
if ext == 'plist':
return 'plist'
elif ext == 'xml':
return 'xml'
elif ext in ['sqlitedb', 'db', 'sqlite']:
return 'db'
elif ext == 'm':
return 'm'
else:
return 'txt'
def run(request, api=False):
"""View iOS Files."""
try:
logger.info('View iOS Source File')
exp = 'Error Description'
file_format = None
if api:
fil = request.POST['file']
md5_hash = request.POST['hash']
mode = request.POST['type']
viewsource_form = ViewSourceIOSApiForm(request.POST)
else:
fil = request.GET['file']
md5_hash = request.GET['md5']
mode = request.GET['type']
viewsource_form = ViewSourceIOSForm(request.GET)
typ = set_ext_api(fil)
if not viewsource_form.is_valid():
err = FormUtil.errors_message(viewsource_form)
if api:
return err
return print_n_send_error_response(request, err, False, exp)
base = Path(settings.UPLD_DIR) / md5_hash
if mode == 'ipa':
src1 = base / 'payload'
src2 = base / 'Payload'
if src1.exists():
src = src1
elif src2.exists():
src = src2
else:
raise Exception('MobSF cannot find Payload directory')
elif mode == 'ios':
src = base
sfile = src / fil
sfile = sfile.as_posix()
if not is_safe_path(src, sfile):
msg = 'Path Traversal Detected!'
if api:
return {'error': 'Path Traversal Detected!'}
return print_n_send_error_response(request, msg, False, exp)
dat = ''
sql_dump = {}
if typ == 'm':
file_format = 'cpp'
with io.open(sfile,
mode='r',
encoding='utf8',
errors='ignore') as flip:
dat = flip.read()
elif typ == 'xml':
file_format = 'xml'
with io.open(sfile,
mode='r',
encoding='utf8',
errors='ignore') as flip:
dat = flip.read()
elif typ == 'plist':
file_format = 'json'
dat = biplist.readPlist(sfile)
try:
dat = json.dumps(dat, indent=4, sort_keys=True)
except Exception:
pass
elif typ == 'db':
file_format = 'asciidoc'
sql_dump = read_sqlite(sfile)
elif typ == 'txt' and fil == 'classdump.txt':
file_format = 'cpp'
app_dir = os.path.join(settings.UPLD_DIR, md5_hash + '/')
cls_dump_file = os.path.join(app_dir, 'classdump.txt')
if is_file_exists(cls_dump_file):
with io.open(cls_dump_file, # lgtm [py/path-injection]
mode='r',
encoding='utf8',
errors='ignore') as flip:
dat = flip.read()
else:
dat = 'Class Dump result not Found'
elif typ == 'txt':
file_format = 'text'
with io.open(sfile,
mode='r',
encoding='utf8',
errors='ignore') as flip:
dat = flip.read()
else:
if api:
return {'error': 'Invalid Parameters'}
return HttpResponseRedirect('/error/')
context = {
'title': escape(ntpath.basename(fil)),
'file': escape(ntpath.basename(fil)),
'type': file_format,
'data': dat,
'sqlite': sql_dump,
'version': settings.MOBSF_VER,
}
template = 'general/view.html'
if api:
return context
return render(request, template, context)
except Exception as exp:
logger.exception('Error Viewing Source')
msg = str(exp)
exp = exp.__doc__
if api:
return print_n_send_error_response(request, msg, True, exp)
return print_n_send_error_response(request, msg, False, exp)
| gpl-3.0 | 7,384,499,573,915,808,000 | 31.258065 | 72 | 0.4996 | false |
alexandrevicenzi/pycompat | tests/test.py | 1 | 2225 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# These tests run only under Linux and Python 2.x +
# This is the Travis CI environment.
#
from pycompat import python as py
from pycompat import system
import sys
import unittest
class TestPyCompat(unittest.TestCase):
def test_python_is_64bits(self):
self.assertEqual(py.is_64bits, not py.is_32bits)
def test_is_cpython(self):
self.assertEqual(py.is_cpython, not py.is_pypy)
def test_immutability(self):
with self.assertRaises(AttributeError):
py.is2xx = 1
def test_python_is1xx(self):
self.assertFalse(py.is1xx)
def test_python_is2xx(self):
self.assertEqual(py.is2xx, sys.version_info[0] == 2)
def test_python_is3xx(self):
self.assertEqual(py.is3xx, sys.version_info[0] == 3)
def test_python_is_eqx(self):
self.assertTrue(py.is_eq(sys.version_info[0]))
def test_python_is_eqxx(self):
self.assertTrue(py.is_eq(sys.version_info[0], sys.version_info[1]))
def test_python_is_eqxxx(self):
self.assertTrue(py.is_eq(sys.version_info[0], sys.version_info[1], sys.version_info[2]))
def test_python_is_gtx(self):
self.assertTrue(py.is_gt(sys.version_info[0] - 1))
def test_python_is_gtxx(self):
self.assertTrue(py.is_gt(sys.version_info[0], sys.version_info[1] - 1))
def test_python_is_gtxxx(self):
self.assertTrue(py.is_gt(sys.version_info[0], sys.version_info[1], sys.version_info[2] - 1))
def test_python_is_ltx(self):
self.assertTrue(py.is_lt(sys.version_info[0] + 1))
def test_python_is_ltxx(self):
self.assertTrue(py.is_lt(sys.version_info[0], sys.version_info[1] + 1))
def test_python_is_ltxxx(self):
self.assertTrue(py.is_lt(sys.version_info[0], sys.version_info[1], sys.version_info[2] + 1))
def test_system_is_windows(self):
self.assertFalse(system.is_windows)
def test_system_is_cygwin(self):
self.assertFalse(system.is_cygwin)
def test_system_is_mac_os(self):
self.assertFalse(system.is_mac_os)
def test_system_is_linux(self):
self.assertTrue(system.is_linux)
if __name__ == '__main__':
unittest.main()
| mit | -7,611,998,995,524,017,000 | 27.525641 | 100 | 0.649888 | false |
mitsuhiko/zine | zine/utils/exceptions.py | 1 | 1632 | # -*- coding: utf-8 -*-
"""
zine.utils.exceptions
~~~~~~~~~~~~~~~~~~~~~
Exception utility module.
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from zine.i18n import _
class ZineException(Exception):
"""Baseclass for all Zine exceptions."""
message = None
def __init__(self, message=None):
Exception.__init__(self)
if message is not None:
self.message = message
def __str__(self):
return self.message or ''
def __unicode__(self):
return str(self).decode('utf-8', 'ignore')
class UserException(ZineException):
"""Baseclass for exception with unicode messages."""
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
if self.message is None:
return u''
return unicode(self.message)
def summarize_exception(exc_info):
def _to_unicode(x):
try:
return unicode(x)
except UnicodeError:
return str(x).encode('utf-8', 'replace')
exc_type, exc_value, tb = exc_info
if isinstance(exc_type, basestring):
prefix = _to_unicode(exc_type)
else:
prefix = _to_unicode(exc_type.__name__)
message = _to_unicode(exc_value)
filename = tb.tb_frame.f_globals.get('__file__')
if filename is None:
filename = _(u'unkown file')
else:
filename = _to_unicode(filename)
if filename.endswith('.pyc'):
filename = filename[:-1]
return u'%s: %s' % (prefix, message), (filename, tb.tb_lineno)
| bsd-3-clause | -1,362,887,440,807,812,400 | 24.5 | 72 | 0.579657 | false |
kasperschmidt/TDOSE | tdose_extract_spectra.py | 1 | 43824 | # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
import numpy as np
import sys
import astropy.io.fits as afits
import collections
import tdose_utilities as tu
import tdose_extract_spectra as tes
import tdose_build_mock_cube as tbmc
import pdb
import scipy.ndimage.filters as snf
import matplotlib as mpl
mpl.use('Agg') # prevent pyplot from opening window; enables closing ssh session with detached screen running TDOSE
import matplotlib.pyplot as plt
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def extract_spectra(model_cube_file,source_association_dictionary=None,nameext='tdose_spectrum',outputdir='./',clobber=False,
variance_cube_file=None,variance_cube_ext='ERROR',source_model_cube_file=None,source_cube_ext='DATA',
model_cube_ext='DATA',layer_scale_ext='WAVESCL',data_cube_file=None,verbose=True):
"""
Assemble the spectra determined by the wavelength layer scaling of the normalized models
when generating the source model cube
--- INPUT ---
model_cube_file Model cube to base extraction on (using header info and layer scales)
source_association_dictionary Source association dictionary defining what sources should be combined into
objects (individual spectra).
nameext The name extension to use for saved spectra
outputdir Directory to save spectra to
clobber Overwrite spectra if they already exists
variance_cube_file File containing variance cube of data to be used to estimate nois on 1D spectrum
variance_cube_ext Extension of variance cube to use
source_model_cube_file The source model cube defining the individual sources
source_cube_ext Extension of source model cube file that contins source models
model_cube_ext Extension of model cube file that contains model
layer_scale_ext Extension of model cube file that contains the layer scales
data_cube_file File containing original data cube used for extraction of aperture spectra
verbose
--- EXAMPLE OF USE ---
"""
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Loading data needed for spectral assembly')
model_cube = afits.open(model_cube_file)[model_cube_ext].data
model_cube_hdr = afits.open(model_cube_file)[model_cube_ext].header
layer_scale_arr = afits.open(model_cube_file)[layer_scale_ext].data
if variance_cube_file is not None:
stddev_cube = np.sqrt(afits.open(variance_cube_file)[variance_cube_ext].data) # turn varinace into standard deviation
source_model_cube = afits.open(source_model_cube_file)[source_cube_ext].data
else:
stddev_cube = None
source_model_cube = None
Nsources = layer_scale_arr.shape[0]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if data_cube_file is not None:
if verbose: print(' - Loading data cube ')
data_cube = afits.open(data_cube_file)[model_cube_ext].data
else:
data_cube = None
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if source_association_dictionary is None:
if verbose: print(' - Building default source association dictionary ' \
'(determining what sources are combined into objects), i.e., one source per object ')
sourcIDs_dic = collections.OrderedDict()
for oo in np.arange(int(Nsources)):
sourcIDs_dic[str(oo)] = [oo]
else:
sourcIDs_dic = source_association_dictionary
Nobj = len(list(sourcIDs_dic.keys()))
if verbose: print(' - Found '+str(Nobj)+' objects to generate spectra for in source_association_dictionary ')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Assembling wavelength vector for spectra ')
wavelengths = np.arange(model_cube_hdr['NAXIS3'])*model_cube_hdr['CD3_3']+model_cube_hdr['CRVAL3']
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
specfiles = []
for oo, key in enumerate(sourcIDs_dic.keys()):
obj_cube_hdr = model_cube_hdr.copy()
try:
specid = str("%.10d" % int(key))
except:
specid = str(key)
specname = outputdir+nameext+'_'+specid+'.fits'
specfiles.append(specname)
sourceIDs = sourcIDs_dic[key]
obj_cube_hdr.append(('OBJID ',specid ,'ID of object'),end=True)
obj_cube_hdr.append(('SRCIDS ',str(sourceIDs) ,'IDs of sources combined in object'),end=True)
if verbose:
infostr = ' - Extracting spectrum '+str("%6.f" % (oo+1))+' / '+str("%6.f" % Nobj)
sys.stdout.write("%s\r" % infostr)
sys.stdout.flush()
sourceoutput = tes.extract_spectrum(sourceIDs,layer_scale_arr,wavelengths,noise_cube=stddev_cube,
source_model_cube=source_model_cube, data_cube=data_cube,
specname=specname,obj_cube_hdr=obj_cube_hdr,clobber=clobber,verbose=True)
if verbose: print('\n - Done extracting spectra. Returning list of fits files generated')
return specfiles
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def extract_spectrum(sourceIDs,layer_scale_arr,wavelengths,noise_cube=None,source_model_cube=None,
specname='tdose_extract_spectra_extractedspec.fits',obj_cube_hdr=None,data_cube=None,
clobber=False,verbose=True):
"""
Extracting a spectrum based on the layer scale image from the model cube provided a list of sources to combine.
Noise is estimated from the noise cube (of the data)
If all layer_scales are 1 a data_cube for the extractions is expected
--- INPUT ---
sourceIDs The source IDs to combine into spectrum
layer_scale_arr Layer scale array (or image) produced when generating the model cube
fractional flux belonging to the source in each pixel
wavelengths Wavelength vector to use for extracted 1D spectrum.
noise_cube Cube with uncertainties (sqrt(variance)) of data cube to be used for estimating 1D uncertainties
To estimate S/N and 1D noise, providing a source model cube is required
source_model_cube Source model cube containing the model cube for each individual source seperately
Needed in order to estimate noise from noise-cube
specname Name of file to save spectrum to
obj_cube_hdr Provide a template header to save the object cube (from combining the individual source cube)
as an extension to the extracted spectrum
data_cube In case all layers scales are 1, it is assumed that the source_model_cube contains a mask for the
spectral extraction, which will then be performed on this data_cube.
clobber To overwrite existing files set clobber=True
verbose Toggle verbosity
--- EXAMPLE OF USE ---
"""
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Checking shape of wavelengths and layer_scale_arr')
if wavelengths.shape[0] != layer_scale_arr.shape[1]:
sys.exit(' ---> Shape of wavelength vector ('+str(wavelengths.shape)+
') and wavelength dimension of layer scale array ('+
layer_scale_arr.shape[1].shape+') do not match.')
else:
if verbose: print(' dimensions match; proceeding...')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Checking all sources have spectra in layer_scale_arr')
maxsource = np.max(sourceIDs)
if maxsource >= layer_scale_arr.shape[0]:
sys.exit(' ---> Sources in list '+str(str(sourceIDs))+
' not available among '+str(layer_scale_arr.shape[0])+' sources in layer_scale_arr.')
else:
if verbose: print(' All sources exist in layer_scale_arr; proceeding...')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Assembling object spectrum from source scaling')
source_ent = np.asarray(sourceIDs).astype(int)
if (layer_scale_arr == 1).all():
if verbose: print(' - All layer scales are 1; assuming source model cube contain mask for spectral extraction')
object_cube = np.sum(np.abs(source_model_cube[source_ent,:,:]),axis=0)
if data_cube is None:
sys.exit(' ---> Did not find a data cube to extrac spectra from as expected')
object_mask = (object_cube == 0) # masking all zeros in object mask
invalid_mask = np.ma.masked_invalid(data_cube).mask
comb_mask = (invalid_mask | object_mask)
spec_1D_masked = np.sum(np.sum( np.ma.array(data_cube,mask=comb_mask) ,axis=1),axis=1)
spec_1D = spec_1D_masked.filled(fill_value=0.0)
if noise_cube is not None:
if verbose: print(' Calculating noise as d_spec_k = sqrt( SUMij d_pix_ij**2 ), i.e., as the sqrt of variances summed')
invalid_mask_noise = np.ma.masked_invalid(noise_cube).mask
comb_mask = (comb_mask | invalid_mask_noise)
variance_1D_masked = np.ma.array(noise_cube,mask=comb_mask)**2
noise_1D_masked = np.sqrt( np.sum( np.sum( variance_1D_masked, axis=1), axis=1) )
noise_1D = noise_1D_masked.filled(fill_value=np.nan)
if verbose: print(' Generating S/N vector')
SN_1D = spec_1D / noise_1D
else:
if verbose: print(' - No "noise_cube" provided. Setting all errors and S/N values to NaN')
SN_1D = np.zeros(spec_1D.shape)*np.NaN
noise_1D = np.zeros(spec_1D.shape)*np.NaN
else:
if verbose: print(' - Some layer scales are different from 1; hence assembling spectra using layer scales')
if len(source_ent) < 1:
spec_1D = layer_scale_arr[source_ent,:]
else:
spec_1D = np.sum( layer_scale_arr[source_ent,:],axis=0)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if noise_cube is not None:
if verbose: print(' - Estimate S/N at each wavelength for 1D spectrum (see Eq. 16 of Kamann+2013)')
if verbose: print(' Estimating fraction of flux in each pixel wrt. total flux in each layer')
object_cube = np.sum((source_model_cube[source_ent,:,:,:]),axis=0) # summing source models for all source IDs
fluxfrac_cube_sents = np.zeros(source_model_cube.shape[1:])
for sent in source_ent:
object_cube_sent = np.sum((source_model_cube[[sent],:,:,:]),axis=0) # getting source model for model 'sent'
fluxscale1D_sent = layer_scale_arr[sent,:]
fluxfrac_cube_sent = object_cube_sent / fluxscale1D_sent[:,None,None]
fluxfrac_cube_sents = fluxfrac_cube_sents + fluxfrac_cube_sent
fluxfrac_cube = fluxfrac_cube_sents / len(source_ent) # renormalizing flux-fraction cube
if verbose: print(' Defining pixel mask (ignoring NaN pixels) ') #+\
# 'and pixels with <'+str(fluxfrac_min)+' of total pixel flux in model cube) '
# pix_mask = (fluxfrac_cube < fluxfrac_min)
invalid_mask1 = np.ma.masked_invalid(fluxfrac_cube).mask
invalid_mask2 = np.ma.masked_invalid(noise_cube).mask
# combining mask making sure all individual mask pixels have True for it to be true in combined mask
comb_mask = (invalid_mask1 | invalid_mask2) # | pix_mask
if verbose: print(' Calculating noise propogated as d_spec_k = 1/sqrt( SUMij (fluxfrac_ij**2 / d_pix_ij**2) )')
squared_ratio = np.ma.array(fluxfrac_cube,mask=comb_mask)**2 / np.ma.array(noise_cube,mask=comb_mask)**2
inv_noise_masked = np.sqrt( np.sum( np.sum( squared_ratio, axis=1), axis=1) )
noise_1D = (1.0/inv_noise_masked).filled(fill_value=0.0)
if verbose: print(' Generating S/N vector')
SN_1D = spec_1D / noise_1D
else:
if verbose: print(' - No "noise_cube" provided. Setting all errors and S/N values to NaN')
SN_1D = np.zeros(spec_1D.shape)*np.NaN
noise_1D = np.zeros(spec_1D.shape)*np.NaN
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Saving extracted 1D spectrum and source cube to \n '+specname)
mainHDU = afits.PrimaryHDU() # primary HDU
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
c1 = afits.Column(name='wave', format='D', unit='ANGSTROMS', array=wavelengths)
c2 = afits.Column(name='flux', format='D', unit='', array=spec_1D)
c3 = afits.Column(name='fluxerror', format='D', unit='', array=noise_1D)
c4 = afits.Column(name='s2n', format='D', unit='', array=SN_1D)
coldefs = afits.ColDefs([c1,c2,c3,c4])
th = afits.BinTableHDU.from_columns(coldefs) # creating default header
# writing hdrkeys:'---KEY--', '----------------MAX LENGTH COMMENT-------------'
th.header.append(('EXTNAME ','SPEC1D' ,'cube containing source'),end=True)
head = th.header
tbHDU = afits.BinTableHDU.from_columns(coldefs, header=head)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if obj_cube_hdr is not None:
objHDU = afits.ImageHDU(object_cube)
for hdrkey in list(obj_cube_hdr.keys()):
if not hdrkey in list(objHDU.header.keys()):
objHDU.header.append((hdrkey,obj_cube_hdr[hdrkey],obj_cube_hdr.comments[hdrkey]),end=True)
try:
objHDU.header.append(('EXTNAMEC',objHDU.header['EXTNAME'] ,'EXTNAME of original source cube'),end=True)
del objHDU.header['EXTNAME']
except:
pass
objHDU.header.append(('EXTNAME ','SOURCECUBE' ,'cube containing source'),end=True)
hdulist = afits.HDUList([mainHDU,tbHDU,objHDU])
else:
hdulist = afits.HDUList([mainHDU,tbHDU])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
hdulist.writeto(specname, overwrite=clobber)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
return wavelengths, spec_1D, noise_1D, object_cube
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def extract_spectra_viasourcemodelcube(datacube,sourcemodelcube,wavelengths,speclist,specids='None',outputdir='./',
noisecube=False,sourcemodel_hdr='None',verbose=True):
"""
Wrapper for tes.extract_spectrum_viasourcemodelcube() to extract mutliple spectra
--- INPUT ----
datacube Datacube to extract spectra from
sourcemodelcube Cube containing the source models for each object used as "extraction cube"
Dimensions should be [Nsources,datacube.shape]
wavelengths Wavelength vector to use for extracted 1D spectrum.
speclist List of spectra to extract. Indexes corresponding to the source models in the
sourcemodlecube
specids List of IDs to use in naming of output for source models referred to in "speclist"
outputdir Directory to store spectra to
noisecube Cube with uncertainties (sqrt(variance)) of data cube to be used in extraction
souremodel_hdr If not 'None' provide a basic fits header for the source model cubes extracted
and they will be appended to the individual output fits file containing the extracted
spectra.
verbose Toggle verbosity
--- EXAMPLE OF USE ---
"""
if verbose: print(' - Check that source models indicated are present in source model cube ')
specnames = []
Nmodels = sourcemodelcube.shape[0]
maxobj = np.max(speclist)
if maxobj >= Nmodels:
sys.exit(' ---> Object model "'+str(maxobj)+'" is not included in source model cube (models start at 0)')
else:
if verbose: print(' All object models appear to be included in the '+str(Nmodels)+' source models found in cube')
if datacube.shape != sourcemodelcube[0].shape:
sys.exit(' ---> Shape of datacube ('+str(datacube.shape)+') and shape of source models ('+
sourcemodelcube[0].shape+') do not match.')
sourcemodel_sum = np.sum(sourcemodelcube,axis=0)
for ss, spec in enumerate(speclist):
if specids == 'None':
specid = spec
else:
specid = specids[ss]
specname = outputdir+'tdose_spectrum_'+str("%.12d" % specid)+'.fits'
specnames.append(specname)
sourcemodel = sourcemodelcube[spec,:,:,:]
sourceweights = sourcemodel/sourcemodel_sum # fractional flux of model for given source in each pixel
sourcemodel_hdr.append(('OBJMODEL',spec ,'Source model number in parent source model cube'),end=True)
sourcemodel_hdr.append(('OBJID ',specid ,'ID of source'),end=True)
if verbose:
infostr = ' - Extracting spectrum '+str("%6.f" % (spec+1))+' / '+str("%6.f" % len(speclist))
sys.stdout.write("%s\r" % infostr)
sys.stdout.flush()
sourceoutput = tes.extract_spectrum_viasourcemodelcube(datacube,sourceweights,wavelengths,specname=specname,
noisecube=noisecube,spec1Dmethod='sum',
sourcecube_hdr=sourcemodel_hdr,verbose=verbose)
if verbose: print('\n - Done extracting spectra. Returning list of fits files containing spectra')
return specnames
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def extract_spectrum_viasourcemodelcube(datacube,sourceweights,wavelengths,
specname='tdose_extract_spectra_extractedspec.fits',
noisecube=None,spec1Dmethod='sum',sourcecube_hdr='None',verbose=True):
"""
Extracting a spectrum from a data cube given a source model (cube) to be used as 'extraction cube'
--- INPUT ---
datacube Datacube to extract spectra from
sourceweights Weights from source model to use as "extraction cube". The weights should contain the
fractional flux belonging to the source in each pixel
wavelengths Wavelength vector to use for extracted 1D spectrum.
specname Name of spectrum to generate
noisecube Cube with uncertainties (sqrt(variance)) of data cube to be used in extraction
spec1Dmethod Method used to extract 1D spectrum from source cube with
sourcecube_hdr If not 'None' provide a fits header for the source cube and it ill be appended to the
output fits file.
verbose Toggle verbosity
--- EXAMPLE OF USE ---
"""
if verbose: print(' - Checking shape of data and source model cubes')
if datacube.shape != sourceweights.shape:
sys.exit(' ---> Shape of datacube ('+str(datacube.shape)+') and source weights ('+
sourceweights.shape+') do not match.')
else:
if verbose: print(' dimensions match; proceeding with extraction ')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Applying weights to "datacube" to obtain source cube ')
sourcecube = datacube*sourceweights
if noisecube is not None:
if verbose: print(' - Using "noisecube" for error propagation ')
datanoise = noisecube
else:
if verbose: print(' - No "noisecube" provided. Setting all errors to 1')
datanoise = np.ones(datacube.shape)
if verbose: print(' - Assuming uncertainty on source weights equals the datanoise when propgating errors')
sourceweights_err = datanoise
sourcecube_err = sourcecube * np.sqrt( (datanoise/datacube)**2 + (sourceweights_err/sourceweights)**2 )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Generating 1D spectrum from source cube via:')
spec_wave = wavelengths
maskinvalid = np.ma.masked_invalid(sourcecube * sourcecube_err).mask
if spec1Dmethod == 'sum':
if verbose: print(' Simple summation of fluxes in sourcecube.')
spec_flux = np.sum(np.sum(np.ma.array(sourcecube,mask=maskinvalid),axis=1),axis=1).filled()
if verbose: print(' Errors are propagated as sum of squares.')
spec_err = np.sqrt( np.sum( np.sum(np.ma.array(sourcecube_err,mask=maskinvalid)**2,axis=1),axis=1) ).filled()
elif spec1Dmethod == 'sum_SNweight':
pdb.set_trace()
else:
sys.exit(' ---> The chosen spec1Dmethod ('+str(spec1Dmethod)+') is invalid')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Saving extracted 1D spectrum and source cube to \n '+specname)
mainHDU = afits.PrimaryHDU() # primary HDU
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
c1 = afits.Column(name='wave', format='D', unit='ANGSTROMS', array=spec_wave)
c2 = afits.Column(name='flux', format='D', unit='', array=spec_flux)
c3 = afits.Column(name='fluxerror', format='D', unit='', array=spec_err)
coldefs = afits.ColDefs([c1,c2,c3])
th = afits.BinTableHDU.from_columns(coldefs) # creating default header
# writing hdrkeys:'---KEY--', '----------------MAX LENGTH COMMENT-------------'
th.header.append(('EXTNAME ','SPEC1D' ,'cube containing source'),end=True)
th.header.append(('SPECMETH' , spec1Dmethod ,'Method used for spectral extraction'),end=True)
head = th.header
tbHDU = afits.BinTableHDU.from_columns(coldefs, header=head)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if sourcecube_hdr != 'None':
sourceHDU = afits.ImageHDU(sourcecube) # default HDU with default minimal header
for hdrkey in list(sourcecube_hdr.keys()):
if not hdrkey in list(sourceHDU.header.keys()):
sourceHDU.header.append((hdrkey,sourcecube_hdr[hdrkey],sourcecube_hdr.comments[hdrkey]),end=True)
sourceHDU.header.append(('EXTNAME ','SOURCECUBE' ,'cube containing source'),end=True)
hdulist = afits.HDUList([mainHDU,tbHDU,sourceHDU])
else:
hdulist = afits.HDUList([mainHDU,tbHDU])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
hdulist.writeto(specname, overwrite=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
return sourcecube, sourcecube_err, spec_wave, spec_flux, spec_err
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def plot_1Dspecs(filelist,plotname='./tdose_1Dspectra.pdf',colors=None,labels=None,plotSNcurve=False,
tdose_wavecol='wave',tdose_fluxcol='flux',tdose_errcol='fluxerror',
simsources=None,simsourcefile='/Users/kschmidt/work/TDOSE/mock_cube_sourcecat161213_all.fits',
sim_cube_dim=None,comparisonspecs=None,comp_colors=['blue'],comp_labels=None,
comp_wavecol='WAVE_AIR',comp_fluxcol='FLUX',comp_errcol='FLUXERR',
xrange=None,yrange=None,showspecs=False,shownoise=True,
skyspecs=None,sky_colors=['red'],sky_labels=['sky'],
sky_wavecol='lambda',sky_fluxcol='data',sky_errcol='stat',
showlinelists=None,linelistcolors=['gray'],smooth=0,ylog=False,
plotratio=False,
verbose=True,pubversion=False):
"""
Plots of multiple 1D spectra
--- INPUT ---
filelist List of spectra filenames to plot
plotname Name of plot to generate
colors Colors of the spectra in filelist to use
labels Labels of the spectra in filelist to use
plotSNcurve Show signal-to-noise curve instead of flux spectra
tdose_wavecol Wavelength column of the spectra in filelist
tdose_fluxcol Flux column of the spectra in filelist
tdose_errcol Flux error column of the spectra in filelist
simsources To plot simulated sources provide ids here
simsourcefile Source file with simulated sources to plot
sim_cube_dim Dimensions of simulated cubes
comparisonspecs To plot comparison spectra provide the filenames of those here
comp_colors Colors of the spectra in comparisonspecs list to use
comp_labels Labels of the spectra in comparisonspecs list to use
comp_wavecol Wavelength column of the spectra in comparisonspecs list
comp_fluxcol Flux column of the spectra in comparisonspecs list
comp_errcol Flux error column of the spectra in comparisonspecs list
xrange Xrange of plot
yrange Yrange of plot
showspecs To show plot instead of storing it to disk set showspecs=True
shownoise To add noise envelope around spectrum set shownoise=True
skyspecs To plot sky spectra provide the filenames of those here
sky_colors Colors of the spectra in skyspecs list to use
sky_labels Labels of the spectra in skyspecs list to use
sky_wavecol Wavelength column of the spectra in skyspecs list
sky_fluxcol Flux column of the spectra in skyspecs list
sky_errcol Flux error column of the spectra in skyspecs list
showlinelists To show line lists provide a list of arrays of dimension (Nlines,2) where each row in the
arrays contains [waveobs, name], where 'waveobs' is the observed wavelengths and 'name' is
a string with the name of each of the Nlines postions to mark on the spectrum.
linelistcolors List of colors for line lists provided in showlinelists
smooth To smooth the spectra, provide sigma of the 1D gaussian smoothing kernel to apply.
For smooth = 0, no smoothing is performed.
ylog To plot y-axis in log scale set to true
plotratio To plot the ratio between the main spectrum and the comparison spectra instead of the actual
spectra, set this keyword to true.
verbose Toggle verbosity
pubversion Generate more publication friendly version of figure
"""
if len(filelist) == 1:
if verbose: print(' - Plotting data from '+filelist[0])
else:
if verbose: print(' - Plotting data from filelist ')
if pubversion:
fig = plt.figure(figsize=(6, 3))
fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.15, right=0.95, bottom=0.18, top=0.83)
Fsize = 12
else:
fig = plt.figure(figsize=(10, 3))
fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.06, right=0.81, bottom=0.15, top=0.95)
Fsize = 10
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Looking for flux units in spectra
bunit = 'BUNIT FLUX' # Default BUNIT
for unitspec in filelist:
if bunit == 'BUNIT FLUX':
try:
sourcecubehdr = afits.open(unitspec)['SOURCECUBE'].header
bunit = sourcecubehdr['BUNIT']
except:
try: # Backwards compatibility to TDOSE v2.0 extractions
sourcecubehdr = afits.open(unitspec)[2].header
bunit = sourcecubehdr['BUNIT']
except:
pass
if bunit == 'BUNIT FLUX':
if verbose: print(' - Did not find BUNIT in SOURCECUBE header for any spectra in filelist - are they not from TDOSE?')
if bunit == '10**(-20)*erg/s/cm**2/Angstrom': # Making bunit LaTeXy for MUSE-Wide BUNIT format
bunit = '1e-20 erg/s/cm$^2$/\AA'
else:
bunit = '$'+bunit+'$' # minimizing pronlems with LaTeXing plot axes
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
lthick = 1
plt.rc('text', usetex=True)
plt.rc('font', family='serif',size=Fsize)
plt.rc('xtick', labelsize=Fsize)
plt.rc('ytick', labelsize=Fsize)
plt.clf()
plt.ioff()
#plt.title(plotname.split('TDOSE 1D spectra'),fontsize=Fsize)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
for ff, specfile in enumerate(filelist):
specdat = afits.open(specfile)[1].data
if colors is None:
spec_color = None
else:
spec_color = colors[ff]
if labels is None:
spec_label = specfile
else:
spec_label = labels[ff]
if xrange is not None:
goodent = np.where((specdat[tdose_wavecol] > xrange[0]) & (specdat[tdose_wavecol] < xrange[1]))[0]
if goodent == []:
if verbose: print(' - The chosen xrange is not covered by the input spectrum. Plotting full spectrum')
goodent = np.arange(len(specdat[tdose_wavecol]))
else:
goodent = np.arange(len(specdat[tdose_wavecol]))
if plotSNcurve:
try:
s2ndat = specdat['s2n'][goodent]
except:
s2ndat = specdat[tdose_fluxcol][goodent]/specdat[tdose_errcol][goodent]
if smooth > 0:
s2ndat = snf.gaussian_filter(s2ndat, smooth)
if not plotratio:
plt.plot(specdat[tdose_wavecol][goodent],s2ndat,color=spec_color,lw=lthick, label=spec_label)
ylabel = 'S/N'
else:
plt.plot(specdat[tdose_wavecol][goodent],s2ndat/s2ndat,color=spec_color,lw=lthick, label=None)
ylabel = 'S/N ratio'
#plotname = plotname.replace('.pdf','_S2N.pdf')
else:
fillalpha = 0.30
fluxdat = specdat[tdose_fluxcol][goodent]
errlow = specdat[tdose_fluxcol][goodent]-specdat[tdose_errcol][goodent]
errhigh = specdat[tdose_fluxcol][goodent]+specdat[tdose_errcol][goodent]
if smooth > 0:
fluxdat = snf.gaussian_filter(fluxdat, smooth)
errlow = snf.gaussian_filter(errlow, smooth)
errhigh = snf.gaussian_filter(errhigh, smooth)
if smooth > 0:
fluxdat = snf.gaussian_filter(fluxdat, smooth)
if not plotratio:
if shownoise:
plt.fill_between(specdat[tdose_wavecol][goodent],errlow,errhigh,
alpha=fillalpha,color=spec_color)
plt.plot(specdat[tdose_wavecol][goodent],fluxdat,
color=spec_color,lw=lthick, label=spec_label)
ylabel = tdose_fluxcol
else:
plt.plot(specdat[tdose_wavecol][goodent],fluxdat/fluxdat,
color=spec_color,lw=lthick, label=None)
ylabel = tdose_fluxcol+' ratio '
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if simsources is not None:
sim_total = np.zeros(len(specdat[tdose_wavecol]))
for sourcenumber in simsources:
sourcedat = afits.open(simsourcefile)[1].data
xpos = sourcedat['xpos'][sourcenumber]
ypos = sourcedat['ypos'][sourcenumber]
fluxscale = sourcedat['fluxscale'][sourcenumber]
sourcetype = sourcedat['sourcetype'][sourcenumber]
spectype = sourcedat['spectype'][sourcenumber]
sourcecube = tbmc.gen_source_cube([ypos,xpos],fluxscale,sourcetype,spectype,cube_dim=sim_cube_dim,
verbose=verbose,showsourceimgs=False)
simspec = np.sum( np.sum(sourcecube, axis=1), axis=1)
sim_total = sim_total + simspec
if smooth > 0:
simspec = snf.gaussian_filter(simspec, smooth)
plt.plot(specdat[tdose_wavecol],simspec,'--',color='black',lw=lthick)
plt.plot(specdat[tdose_wavecol],sim_total,'--',color='black',lw=lthick,
label='Sim. spectrum: \nsimsource='+str(simsources))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if comparisonspecs is not None:
for cc, comparisonspec in enumerate(comparisonspecs):
compdat = afits.open(comparisonspec)[1].data
if xrange is not None:
goodent = np.where((compdat[comp_wavecol] > xrange[0]) & (compdat[comp_wavecol] < xrange[1]))[0]
if goodent == []:
if verbose: print(' - The chosen xrange is not covered by the comparison spectrum. Plotting full spectrum')
goodent = np.arange(len(compdat[comp_wavecol]))
else:
goodent = np.arange(len(compdat[comp_wavecol]))
if comp_colors is None:
comp_color = None
else:
comp_color = comp_colors[cc]
if comp_labels is None:
comp_label = comparisonspec
else:
comp_label = comp_labels[cc]
if plotSNcurve:
s2ncompdat = compdat[comp_fluxcol][goodent]/compdat[comp_errcol][goodent]
if smooth > 0:
s2ncompdat = snf.gaussian_filter(s2ncompdat, smooth)
if not plotratio:
plt.plot(compdat[comp_wavecol][goodent],s2ncompdat,
color=comp_color,lw=lthick, label=comp_label)
else:
plt.plot(compdat[comp_wavecol][goodent],s2ndat/s2ncompdat,
color=comp_color,lw=lthick, label=comp_label)
else:
fillalpha = 0.30
fluxcompdat = compdat[comp_fluxcol][goodent]
errlow = compdat[comp_fluxcol][goodent]-compdat[comp_errcol][goodent]
errhigh = compdat[comp_fluxcol][goodent]+compdat[comp_errcol][goodent]
if smooth > 0:
fluxcompdat = snf.gaussian_filter(fluxcompdat, smooth)
errlow = snf.gaussian_filter(errlow, smooth)
errhigh = snf.gaussian_filter(errhigh, smooth)
if not plotratio:
if shownoise:
plt.fill_between(compdat[comp_wavecol][goodent],errlow,errhigh,
alpha=fillalpha,color=comp_color)
plt.plot(compdat[comp_wavecol][goodent],fluxcompdat,
color=comp_color,lw=lthick, label=comp_label)
else:
plt.plot(compdat[comp_wavecol][goodent],fluxdat/fluxcompdat,
color=comp_color,lw=lthick, label=comp_label)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if skyspecs is not None:
for ss, skyspec in enumerate(skyspecs):
skydat = afits.open(skyspec)[1].data
if xrange is not None:
goodent = np.where((skydat[sky_wavecol] > xrange[0]) & (skydat[sky_wavecol] < xrange[1]))[0]
if goodent == []:
if verbose: print(' - The chosen xrange is not covered by the sky spectrum. Plotting full spectrum')
goodent = np.arange(len(skydat[sky_wavecol]))
else:
goodent = np.arange(len(skydat[sky_wavecol]))
if sky_colors is None:
sky_color = None
else:
sky_color = sky_colors[ss]
if sky_labels is None:
sky_label = skyspec
else:
sky_label = sky_labels[ss]
if plotSNcurve:
s2nsky = skydat[sky_fluxcol][goodent]/skydat[sky_errcol][goodent]
if smooth > 0:
s2nsky = snf.gaussian_filter(s2nsky, smooth)
plt.plot(skydat[sky_wavecol][goodent],s2nsky,
color=sky_color,lw=lthick, label=sky_label)
else:
fillalpha = 0.30
fluxsky = skydat[sky_fluxcol][goodent]
errlow = skydat[sky_fluxcol][goodent]-skydat[sky_errcol][goodent]
errhigh = skydat[sky_fluxcol][goodent]+skydat[sky_errcol][goodent]
if smooth > 0:
fluxsky = snf.gaussian_filter(fluxsky, smooth)
errlow = snf.gaussian_filter(errlow, smooth)
errhigh = snf.gaussian_filter(errhigh, smooth)
if shownoise:
plt.fill_between(skydat[sky_wavecol][goodent],errlow,errhigh,
alpha=fillalpha,color=sky_color)
plt.plot(skydat[sky_wavecol][goodent],fluxsky,
color=sky_color,lw=lthick, label=sky_label)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if xrange is None:
xvals = [4800,9300]
else:
xvals = xrange
plt.plot(xvals,[0,0],'--k',lw=lthick)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
plt.xlabel('Wavelength [\AA]', fontsize=Fsize)
if pubversion:
if plotSNcurve:
ylabel = 'Signal-to-Noise'
else:
ylabel = 'Flux ['+str(bunit)+']'
if plotratio:
ylabel = ylabel+' ratio'
plt.ylabel(ylabel, fontsize=Fsize)
if ylog:
plt.yscale('log')
if yrange is not None:
plt.ylim(yrange)
if xrange is not None:
plt.xlim(xrange)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if showlinelists is not None:
for sl, showlinelist in enumerate(showlinelists):
ymin, ymax = plt.ylim()
xmin, xmax = plt.xlim()
for ww, wave in enumerate(showlinelist[:,0]):
wave = float(wave)
if (wave < xmax) & (wave > xmin):
plt.plot([wave,wave],[ymin,ymax],linestyle='--',color=linelistcolors[sl],lw=lthick)
plt.text(wave,ymin+1.03*np.abs([ymax-ymin]),showlinelist[:,1][ww],color=linelistcolors[sl], fontsize=Fsize-2., ha='center')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if pubversion:
leg = plt.legend(fancybox=True, loc='upper center',prop={'size':Fsize-2},ncol=4,numpoints=1,
bbox_to_anchor=(0.44, 1.27)) # add the legend
else:
leg = plt.legend(fancybox=True, loc='upper right',prop={'size':Fsize},ncol=1,numpoints=1,
bbox_to_anchor=(1.25, 1.03)) # add the legend
leg.get_frame().set_alpha(0.7)
if showspecs:
if verbose: print(' Showing plot (not saving to file)')
plt.show()
else:
if verbose: print(' Saving plot to',plotname)
plt.savefig(plotname)
plt.clf()
plt.close('all')
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def plot_histograms(datavectors,plotname='./tdose_cubehist.pdf',colors=None,labels=None,bins=None,
xrange=None,yrange=None,verbose=True,norm=True,ylog=True):
"""
Plot histograms of a set of data vectors.
--- INPUT ---
datavectors Set of data vectors to plot histograms of
plotname Name of plot to generate
colors Colors to use for histograms
labels Labels for the data vectors
bins Bins to use for histograms. Can be generated with np.arange(minval,maxval+binwidth,binwidth)
xrange Xrange of plot
yrange Yrange of plot
verbose Toggle verbosity
norm Noramlize the histograms
ylog Use a logarithmic y-axes when plotting
"""
Ndat = len(datavectors)
if verbose: print(' - Plotting histograms of N = '+str(Ndat)+' data vectors')
if colors is None:
colors = ['blue']*Ndat
if labels is None:
labels = ['data vector no. '+str(ii+1) for ii in np.arange(Ndat)]
if bins is None:
bins = np.arange(-100,102,2)
fig = plt.figure(figsize=(10, 3))
fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.08, right=0.81, bottom=0.1, top=0.95)
Fsize = 10
lthick = 1
plt.rc('text', usetex=True)
plt.rc('font', family='serif',size=Fsize)
plt.rc('xtick', labelsize=Fsize)
plt.rc('ytick', labelsize=Fsize)
plt.clf()
plt.ioff()
#plt.title(plotname.split('TDOSE 1D spectra'),fontsize=Fsize)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
for dd, datavec in enumerate(datavectors):
hist = plt.hist(datavec[~np.isnan(datavec)],color=colors[dd],bins=bins,histtype="step",lw=lthick,
label=labels[dd],normed=norm)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if yrange is None:
yvals = [1e-5,1e8]
else:
yvals = yrange
plt.plot([0,0],yvals,'--k',lw=lthick)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
plt.xlabel('', fontsize=Fsize)
plt.ylabel('\#', fontsize=Fsize)
if yrange is not None:
plt.ylim(yrange)
if xrange is not None:
plt.xlim(xrange)
if ylog:
plt.yscale('log')
leg = plt.legend(fancybox=True, loc='upper right',prop={'size':Fsize},ncol=1,numpoints=1,
bbox_to_anchor=(1.25, 1.03)) # add the legend
leg.get_frame().set_alpha(0.7)
if verbose: print(' Saving plot to',plotname)
plt.savefig(plotname)
plt.clf()
plt.close('all')
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = | mit | 2,141,411,611,090,954,000 | 50.925355 | 143 | 0.5421 | false |
bmcfee/gordon | scripts/mbrainz_import.py | 1 | 6643 | #! /usr/bin/env python
# Copyright (C) 2010 Douglas Eck
#
# This file is part of Gordon.
#
# Gordon is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gordon is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gordon. If not, see <http://www.gnu.org/licenses/>.
"""Set up a local copy of the MusicBrainz database"""
import os, pg, sys, socket
import gordon.db.config as config
from gordon import make_subdirs
def logged_on_dbhost() :
"""returns true if we are logged in on local dbhost"""
if config.DEF_DBHOST.lower() == 'localhost':
return True
return socket.getfqdn()==socket.getfqdn(config.DEF_DBHOST)
def do_perms() :
#this allows us to import using the more powerful brams account but then give the dbs over to gordon
if not logged_on_dbhost() or os.environ['USER']<>config.DEF_DBSUPERUSER:
raise ValueError('This must be run as user %s from host %s' % (config.DEF_DBSUPERUSER,config.DEF_DBHOST))
dbmb = pg.connect('musicbrainz_db', user=config.DEF_DBSUPERUSER)
result = dbmb.query("select tablename from pg_tables where schemaname = 'public'").getresult()
for r in result :
table = r[0]
if table=='PendingData' :
continue
#q = 'alter table %s owner to "%s";' % (table, config.DEF_DBUSER)
q = 'grant select on %s to "%s";' % (table, config.DEF_DBUSER)
print q
try :
dbmb.query(q)
except :
print 'Failed'
def do_import() :
"""import tables. Does this need to have existing tables dropped first? I think so..."""
currdir=os.getcwd()
#first drop tables if they exist
cmd="dropdb 'musicbrainz_db'"
print 'Executing',cmd
os.system(cmd)
cmd="dropdb 'musicbrainz_db_raw'"
print 'Executing',cmd
os.system(cmd)
svnpath='%s/mbrainz/svn' % config.DEF_GORDON_DIR
reppath='%s/%s' % (svnpath,'mb_server')
os.chdir(reppath)
cmd='./admin/InitDb.pl --createdb --import ../../mbdump/latest/mb*bz2 --echo'
print 'Executing',cmd
os.system(cmd)
os.chdir(currdir)
def do_svn() :
"""Gets fresh svn repo to DEF_GORDON_DIR/mbrainz/svn/mb_server from the appropriate branch (NOT TRUNK!)"""
print 'TODO'
print 'If you do this you will need to edit cgi-bin/DBDefs.pl in the following way:'
print 'Replace musicbrainz_user with %s everywhere' % config.DEF_DBUSER
print 'Replace postgres with your superuser account if you have replaced postgres. E.g I use eckdoug'
print 'This should be automated here!'
print 'E.g replace username => "postgres", with username => "eckdoug",'
print 'Also set the password for user %s to be the password' % config.DEF_DBUSER
print ''
print 'Also if you are running with local=utf, you need to edit the appropriate part of InitDb.pl to skip'
print 'The test for locale.'
print 'E.g. replace in file admin/InitDb.pl: '
print ' unless ($locale eq "C")'
print 'with'
print ' unless (1)'
currdir=os.getcwd()
svnpath='%s/mbrainz/svn' % config.DEF_GORDON_DIR
reppath='%s/%s' % (svnpath,'mb_server')
if os.path.exists(reppath):
os.chdir(reppath)
print 'Doing svn update'
os.system('svn update')
else :
os.chdir(svnpath)
#os.system('svn co http://svn.musicbrainz.org/mb_server/trunk mb_server')
print 'We just rebuilt the musicbrainz svn. you will need to modify some config files. See:'
print 'Most should be in lib/DBDefs.pm.default lib/DBDefs.pm'
sys.exit(0)
os.chdir(currdir)
def do_ftp(site='ftp://ftp.musicbrainz.org/pub/musicbrainz/data/fullexport',force=False) :
"""Imports fresh database files to DEF_GORDON_DIR/mbrainz/mbdump/latest"""
import ftplib
ftp=ftplib.FTP('ftp.musicbrainz.org')
ftp.login('anonymous','')
ftp.cwd('pub/musicbrainz/data/fullexport')
for f in ftp.nlst() :
if f.startswith('latest-is') :
dr=f[10:]
print 'Latest is',dr
testdir=os.path.join(config.DEF_GORDON_DIR,'mbrainz','mbdump',dr)
if os.path.exists(testdir) :
print 'We already have this dump... skipping. Set force=True to force download'
if not force :
ftp.close()
return
#handle for our writing
def _ftp_handle(block) :
fout.write(block)
print ".",
#we should be in the right directory now
ftp.cwd(dr)
for f in ftp.nlst() :
f=f.strip()
# Open the file for writing in binary mode
fnout=os.path.join(config.DEF_GORDON_DIR,'mbrainz','mbdump',dr,f)
make_subdirs(fnout)
print 'Opening local file ' + fnout
fout= open(fnout, 'wb')
print 'downloading',f
ftp.retrbinary('RETR ' + f, _ftp_handle)
print 'Done downloading',f
fout.close()
ftp.close()
currdir=os.getcwd()
#create symlink from latest to our new dump
os.chdir('%s/mbrainz/mbdump' % config.DEF_GORDON_DIR)
try :
os.system('rm latest')
except :
pass
os.system('ln -s %s latest' % dr)
os.chdir(currdir)
def mbrainz_import():
if not logged_on_dbhost() or os.environ['USER']<>config.DEF_DBSUPERUSER:
raise ValueError('This must be run as user %s from host %s' % (config.DEF_DBSUPERUSER,config.DEF_DBHOST))
do_ftp()
do_svn()
#raw_input('commented out do_ftp and do_svn temporarily!')
do_import()
do_perms()
def die_with_usage() :
print 'This program rebuilds the musicbrainz databases.'
print 'It loads the dumps to %s/mbrainz/mbdump/latest' % config.DEF_GORDON_DIR
print 'It then updates the svn code in %s/mbrainz/svn' % config.DEF_GORDON_DIR
print 'It then does necessary data imports from the dumps'
print ('It then sets appropriate permissions to allow user %s to read the databases'
% config.DEF_DBUSER)
print ''
print 'To run code:'
print 'Be sure you are logged into %s' % config.DEF_DBHOST
print 'Be sure you are user %s' % config.DEF_DBSUPERUSER
print 'mbrainz_import.py go'
sys.exit(0)
if __name__=='__main__' :
if len(sys.argv)<2 :
die_with_usage()
mbrainz_import()
| gpl-3.0 | 8,949,096,639,798,875,000 | 33.780105 | 113 | 0.646094 | false |
tboyce021/home-assistant | tests/components/config/test_automation.py | 1 | 5030 | """Test Automation config panel."""
import json
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
from tests.async_mock import patch
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa
async def test_get_device_config(hass, hass_client):
"""Test getting device config."""
with patch.object(config, "SECTIONS", ["automation"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
def mock_read(path):
"""Mock reading data."""
return [{"id": "sun"}, {"id": "moon"}]
with patch("homeassistant.components.config._read", mock_read):
resp = await client.get("/api/config/automation/config/moon")
assert resp.status == 200
result = await resp.json()
assert result == {"id": "moon"}
async def test_update_device_config(hass, hass_client):
"""Test updating device config."""
with patch.object(config, "SECTIONS", ["automation"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
orig_data = [{"id": "sun"}, {"id": "moon"}]
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
), patch("homeassistant.config.async_hass_config_yaml", return_value={}):
resp = await client.post(
"/api/config/automation/config/moon",
data=json.dumps({"trigger": [], "action": [], "condition": []}),
)
assert resp.status == 200
result = await resp.json()
assert result == {"result": "ok"}
assert list(orig_data[1]) == ["id", "trigger", "condition", "action"]
assert orig_data[1] == {"id": "moon", "trigger": [], "condition": [], "action": []}
assert written[0] == orig_data
async def test_bad_formatted_automations(hass, hass_client):
"""Test that we handle automations without ID."""
with patch.object(config, "SECTIONS", ["automation"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
orig_data = [
{
# No ID
"action": {"event": "hello"}
},
{"id": "moon"},
]
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
), patch("homeassistant.config.async_hass_config_yaml", return_value={}):
resp = await client.post(
"/api/config/automation/config/moon",
data=json.dumps({"trigger": [], "action": [], "condition": []}),
)
await hass.async_block_till_done()
assert resp.status == 200
result = await resp.json()
assert result == {"result": "ok"}
# Verify ID added to orig_data
assert "id" in orig_data[0]
assert orig_data[1] == {"id": "moon", "trigger": [], "condition": [], "action": []}
async def test_delete_automation(hass, hass_client):
"""Test deleting an automation."""
ent_reg = await hass.helpers.entity_registry.async_get_registry()
assert await async_setup_component(
hass,
"automation",
{
"automation": [
{
"id": "sun",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
},
{
"id": "moon",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
},
]
},
)
assert len(ent_reg.entities) == 2
with patch.object(config, "SECTIONS", ["automation"]):
assert await async_setup_component(hass, "config", {})
client = await hass_client()
orig_data = [{"id": "sun"}, {"id": "moon"}]
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
), patch("homeassistant.config.async_hass_config_yaml", return_value={}):
resp = await client.delete("/api/config/automation/config/sun")
await hass.async_block_till_done()
assert resp.status == 200
result = await resp.json()
assert result == {"result": "ok"}
assert len(written) == 1
assert written[0][0]["id"] == "moon"
assert len(ent_reg.entities) == 1
| apache-2.0 | 774,325,679,850,653,300 | 29.301205 | 87 | 0.57495 | false |
mbeyeler/pulse2percept | examples/implants/plot_custom_electrode_array.py | 1 | 6434 | """
============================================================================
Creating your own electrode array
============================================================================
This example shows how to create a new
:py:class:`~pulse2percept.implants.ElectrodeArray` object.
As the base class for all electrode arrays in pulse2percept, the
:py:class:`~pulse2percept.implants.ElectrodeArray` class provides a blue print
for the functionality that every electrode array should have.
First and foremost, an :py:class:`~pulse2percept.implants.ElectrodeArray`
contains a collection of :py:class:`~pulse2percept.implants.Electrode` objects,
and new electrodes can be added via the
:py:func:`~pulse2percept.implants.ElectrodeArray.add_electrodes` method.
In addition, individual electrodes in the array can be accessed by indexing
using either their pre-assigned names (a string) or their place in the array
(integer).
Arranging electrodes in a circle
--------------------------------
In this example, we want to build a new type of electrode array that arranges
all of its electrodes in a circle.
To do this, we need to create a new class ``CircleElectrodeArray`` that is
a child of :py:class:`~pulse2percept.implants.ElectrodeArray`:
"""
##############################################################################
# .. code-block:: python
#
# class CircleElectrodeArray(ElectrodeArray):
# """Electrodes arranged in a circle"""
# ...
#
# This way, the ``CircleElectrodeArray`` class can access all public methods
# of :py:class:`~pulse2percept.implants.ElectrodeArray`.
#
# The constructor then has the job of creating all electrodes in the array
# and placing them at the appropriate location; for example, by using the
# :py:func:`~pulse2percept.implants.ElectrodeArray.add_electrodes` method.
#
# The constructor of the class should accept a number of arguments:
#
# - ``n_electrodes``: how many electrodes to arrange in a circle
# - ``radius``: the radius of the circle
# - ``x_center``: the x-coordinate of the center of the circle
# - ``y_center``: the y-coordinate of the center of the circle
#
# For simplicity, we will use :py:class:`~pulse2percept.implants.DiskElectrode`
# objects of a given radius (100um), although it would be relatively straightforward
# to allow the user to choose the electrode type.
from pulse2percept.implants import ElectrodeArray, DiskElectrode
import collections as coll
import numpy as np
class CircleElectrodeArray(ElectrodeArray):
def __init__(self, n_electrodes, radius, x_center, y_center):
"""Electrodes arranged in a circle
Electrodes will be named 'A0', 'A1', ...
Parameters
----------
n_electrodes : int
how many electrodes to arrange in a circle
radius : float
the radius of the circle (microns)
x_center, y_center : float
the x,y coordinates of the center of the circle (microns),
where (0,0) is the center of the fovea
"""
# The job of the constructor is to create the electrodes. We start
# with an empty collection:
self._electrodes = coll.OrderedDict()
# We then generate a number `n_electrodes` of electrodes, arranged on
# the circumference of a circle:
for n in range(n_electrodes):
# Angular position of the electrode:
ang = 2.0 * np.pi / n_electrodes * n
# Create the disk electrode:
electrode = DiskElectrode(x_center + np.cos(ang) * radius,
y_center + np.sin(ang) * radius, 0, 100)
# Add the electrode to the collection:
self.add_electrode('A' + str(n), electrode)
##############################################################################
# Using the CircleElectrodeArray class
# ------------------------------------
#
# To use the new class, we need to specify all input arguments and pass them
# to the constructor:
n_electrodes = 10
radius = 1000 # radius in microns
x_center = 0 # x-coordinate of circle center (microns)
y_center = 0 # y-coordinate of circle center (microns)
# Create a new instance of type CircleElectrodeArray:
earray = CircleElectrodeArray(n_electrodes, radius, x_center, y_center)
print(earray)
##############################################################################
# Individual electrodes can be accessed by their name or integer index:
earray[0]
earray['A0']
earray[0] == earray['A0']
##############################################################################
# Visualizing the electrode array
# -------------------------------
#
# Electrode arrays come with their own plotting method:
earray.plot()
##############################################################################
# By default, the method will use the current Axes object or create a new one
# if none exists. Alternatively, you can specify ``ax=`` yourself.
#
# Extending the CircleElectrodeArray class
# ----------------------------------------
#
# Similar to extending :py:class:`~pulse2percept.implants.ElectrodeArray` for
# our purposes, we can extend ``CircleElectrodeArray``.
#
# To add new functionality, we could simply edit the above constructor.
# However, nobody stops us from creating our own hierarchy of classes.
#
# For example, we could build a ``FlexibleCircleElectrodeArray`` that allows us
# to remove individual electrodes from the array:
class FlexibleCircleElectrodeArray(CircleElectrodeArray):
def remove(self, name):
"""Deletean electrode from the array
Parameters
----------
name : int, string
the name of the electrode to be removed
"""
del self.electrodes[name]
##############################################################################
# Note how we didn't even specify a constructor.
# By default, the class inherits all (public) functionality from its parent,
# including its constructor. So the following line will create the same
# electrode array as above:
flex_earray = FlexibleCircleElectrodeArray(
n_electrodes, radius, x_center, y_center)
print(flex_earray)
##############################################################################
# A single electrode can be removed by passing its name to the ``remove``
# method:
# Remove electrode 'A1'
flex_earray.remove('A1')
# Replot the implant:
flex_earray.plot()
| bsd-3-clause | 1,638,808,666,672,022,800 | 35.765714 | 84 | 0.614081 | false |
asterix135/whoshouldivotefor | explorer/migrations/0008_auto_20170627_0253.py | 1 | 1741 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-27 06:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('explorer', '0007_auto_20170626_0543'),
]
operations = [
migrations.CreateModel(
name='IssueCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(max_length=100, unique=True)),
],
),
migrations.AlterField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='explorer.Question'),
),
migrations.AlterField(
model_name='poll',
name='election',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='polls', to='explorer.Election'),
),
migrations.RemoveField(
model_name='question',
name='poll',
),
migrations.AddField(
model_name='question',
name='poll',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='explorer.Poll'),
preserve_default=False,
),
migrations.AddField(
model_name='question',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='explorer.IssueCategory'),
preserve_default=False,
),
]
| mit | 2,262,988,717,029,549,800 | 34.530612 | 138 | 0.589316 | false |
Geoportail-Luxembourg/geoportailv3 | geoportal/geoportailv3_geoportal/scripts/db2es.py | 1 | 3233 | # -*- coding: utf-8 -*-
from pyramid.paster import bootstrap
import psycopg2
from psycopg2.extras import DictCursor
import sys
import getopt
import json
from elasticsearch import helpers
from elasticsearch.helpers import BulkIndexError
from elasticsearch.exceptions import ConnectionTimeout
from geoportailv3_geoportal.lib.search import get_elasticsearch, get_index, \
ensure_index
"""
Utility functions for importing data into Elasticsearch from database
"""
def get_cursor():
source_conf = {
'database': 'search',
'user': 'postgres',
'password': '',
'host': 'luigi11',
'port': '5432'
}
conn = psycopg2.connect(**source_conf)
cursor = conn.cursor(cursor_factory=DictCursor)
query = "Select *, ST_AsGeoJSON(ST_Transform(\"searchLayer\".geom,4326)) as geom_4326 \
from public.\"searchLayer\" ;"
cursor.execute(query)
return cursor
def update_document(index, type, obj_id, obj=None):
doc = {
"_index": index,
"_type": "poi",
"_id": obj_id,
}
doc['_source'] = {}
doc['_source']['ts'] = json.loads(obj['geom_4326'])
doc['_source']['object_id'] = obj_id
doc['_source']['fk'] = obj['fk']
doc['_source']['object_type'] = 'poi'
doc['_source']['layer_name'] = obj['type']
doc['_source']['label'] = obj['label']
doc['_source']['role_id'] = 1
doc['_source']['public'] = True
return doc
def statuslog(text):
sys.stdout.write(text)
sys.stdout.flush()
def main():
env = bootstrap('development.ini')
request = env['request']
try:
opts, args = getopt.getopt(sys.argv[1:], 'ri', ['reset', 'index'])
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
index = False
reset = False
for o, a in opts:
if o in ('-r', '--reset'):
statuslog('\rResetting Index')
reset = True
if o in ('-i', '--index'):
statuslog('\rChecking Index')
index = True
import time
index_name = get_index(request) + '_' + time.strftime("%Y%m%d")
ensure_index(get_elasticsearch(request), index_name, reset)
if index is True:
statuslog("\rCreating Database Query ")
c = get_cursor()
counter = 1
while True:
multiple = 250
results = c.fetchmany(multiple)
doc_list = []
for result in results:
doc = update_document(get_index(request),
'poi',
result['id'],
result)
doc_list.append(doc)
statuslog("\rIndexed Elements: %i" % int(counter))
counter = counter + 1
try:
helpers.bulk(client=get_elasticsearch(request),
actions=doc_list,
chunk_size=multiple,
raise_on_error=True)
except (BulkIndexError, ConnectionTimeout) as e:
print("\n {}".format(e))
if not results:
statuslog("\n")
break
if __name__ == '__main__':
main()
| mit | 1,212,416,682,987,955,200 | 28.390909 | 91 | 0.534488 | false |
dlsun/symbulate | symbulate/index_sets.py | 1 | 1353 | import numbers
class IndexSet(object):
def __init__(self):
return
def __getitem__(self, t):
if t in self:
return t
else:
raise KeyError("Time %.2f not in index set." % t)
def __contains__(self, value):
return False
def __eq__(self, other):
return type(other) == type(self)
class Reals(IndexSet):
def __init__(self):
return
def __contains__(self, value):
try:
return -float("inf") < value < float("inf")
except:
return False
class Naturals(IndexSet):
def __init__(self):
return
def __contains__(self, value):
try:
return (
value >= 0 and
(isinstance(value, numbers.Integral) or
value.is_integer())
)
except:
return False
class DiscreteTimeSequence(IndexSet):
def __init__(self, fs):
self.fs = fs
def __getitem__(self, n):
return n / self.fs
def __contains__(self, value):
return float(value * self.fs).is_integer()
def __eq__(self, index):
return (
isinstance(index, DiscreteTimeSequence) and
(self.fs == index.fs)
)
class Integers(DiscreteTimeSequence):
def __init__(self):
self.fs = 1
| mit | 5,717,099,780,918,022,000 | 18.328571 | 61 | 0.503326 | false |
regisf/Strawberry | strawberrylib/blueprints/__init__.py | 1 | 1171 | # -*- coding: utf-8 -*-
# Strawberry Blog Engine
#
# Copyright (c) 2014 Regis FLORET
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = 'Regis FLORET'
| mit | -8,769,378,438,249,852,000 | 47.791667 | 80 | 0.766866 | false |
MungoRae/home-assistant | homeassistant/components/ecobee.py | 1 | 3654 | """
Support for Ecobee.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/ecobee/
"""
import logging
import os
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.const import CONF_API_KEY
from homeassistant.util import Throttle
REQUIREMENTS = ['python-ecobee-api==0.0.7']
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
CONF_HOLD_TEMP = 'hold_temp'
DOMAIN = 'ecobee'
ECOBEE_CONFIG_FILE = 'ecobee.conf'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=180)
NETWORK = None
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_API_KEY): cv.string,
vol.Optional(CONF_HOLD_TEMP, default=False): cv.boolean
})
}, extra=vol.ALLOW_EXTRA)
def request_configuration(network, hass, config):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
if 'ecobee' in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING['ecobee'], "Failed to register, please try again.")
return
# pylint: disable=unused-argument
def ecobee_configuration_callback(callback_data):
"""Handle configuration callbacks."""
network.request_tokens()
network.update()
setup_ecobee(hass, network, config)
_CONFIGURING['ecobee'] = configurator.request_config(
"Ecobee", ecobee_configuration_callback,
description=(
'Please authorize this app at https://www.ecobee.com/consumer'
'portal/index.html with pin code: ' + network.pin),
description_image="/static/images/config_ecobee_thermostat.png",
submit_caption="I have authorized the app."
)
def setup_ecobee(hass, network, config):
"""Set up the Ecobee thermostat."""
# If ecobee has a PIN then it needs to be configured.
if network.pin is not None:
request_configuration(network, hass, config)
return
if 'ecobee' in _CONFIGURING:
configurator = hass.components.configurator
configurator.request_done(_CONFIGURING.pop('ecobee'))
hold_temp = config[DOMAIN].get(CONF_HOLD_TEMP)
discovery.load_platform(
hass, 'climate', DOMAIN, {'hold_temp': hold_temp}, config)
discovery.load_platform(hass, 'sensor', DOMAIN, {}, config)
discovery.load_platform(hass, 'binary_sensor', DOMAIN, {}, config)
class EcobeeData(object):
"""Get the latest data and update the states."""
def __init__(self, config_file):
"""Init the Ecobee data object."""
from pyecobee import Ecobee
self.ecobee = Ecobee(config_file)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from pyecobee."""
self.ecobee.update()
_LOGGER.info("Ecobee data updated successfully")
def setup(hass, config):
"""Set up the Ecobee.
Will automatically load thermostat and sensor components to support
devices discovered on the network.
"""
# pylint: disable=global-statement, import-error
global NETWORK
if 'ecobee' in _CONFIGURING:
return
from pyecobee import config_from_file
# Create ecobee.conf if it doesn't exist
if not os.path.isfile(hass.config.path(ECOBEE_CONFIG_FILE)):
jsonconfig = {"API_KEY": config[DOMAIN].get(CONF_API_KEY)}
config_from_file(hass.config.path(ECOBEE_CONFIG_FILE), jsonconfig)
NETWORK = EcobeeData(hass.config.path(ECOBEE_CONFIG_FILE))
setup_ecobee(hass, NETWORK.ecobee, config)
return True
| apache-2.0 | 4,711,755,110,755,043,000 | 28.467742 | 76 | 0.681445 | false |
sfischer13/python-arpa | tests/test_model_base.py | 1 | 2958 | import arpa
from arpa.models.base import ARPAModel
from arpa.models.simple import ARPAModelSimple
import pytest
from test_arpa import PARSERS
from test_arpa import TEST_ARPA
def test_manual_log_p_unk():
lm = arpa.loadf(TEST_ARPA)[0]
assert lm.log_p('UnladenSwallow') == -1.995635
def test_manual_p():
lm = arpa.loadf(TEST_ARPA)[0]
assert round(lm.p('<s>'), 4) == 0
def test_manual_contains():
lm = arpa.loadf(TEST_ARPA)[0]
assert 'foo' in lm
with pytest.raises(ValueError):
assert ('foo', ) in lm
with pytest.raises(ValueError):
assert 'a little' in lm
with pytest.raises(ValueError):
assert ('a', 'little') in lm
def test_new_model_contains_not():
lm = ARPAModelSimple()
assert 'foo' not in lm
with pytest.raises(ValueError):
assert ('foo', ) not in lm
with pytest.raises(ValueError):
assert 'a little' not in lm
with pytest.raises(ValueError):
assert ('a', 'little') not in lm
def test_new_model_counts():
lm = ARPAModelSimple()
assert lm.counts() == []
def test_new_model_len():
lm = ARPAModelSimple()
assert len(lm) == 0
def test_log_p_raw():
lm = ARPAModelSimple()
with pytest.raises(KeyError):
lm.log_p_raw('UnladenSwallow')
def test_log_p_empty_string():
lm = ARPAModelSimple()
with pytest.raises(ValueError):
lm.log_p('')
def test_log_p_empty_tuple():
lm = ARPAModelSimple()
with pytest.raises(ValueError):
lm.log_p(tuple())
def test_log_p_int():
lm = ARPAModelSimple()
with pytest.raises(ValueError):
lm.log_p(1)
def test_log_s_int():
lm = ARPAModelSimple()
with pytest.raises(ValueError):
lm.log_s(1)
def test_input_equality():
lm = ARPAModelSimple()
with pytest.raises(KeyError):
assert lm.p('foo') == lm.p(('foo', ))
with pytest.raises(KeyError):
assert lm.p('xxx') == lm.p(('xxx', ))
with pytest.raises(KeyError):
assert lm.p('a little') == lm.p(('a', 'little'))
with pytest.raises(KeyError):
assert lm.p('xxx little') == lm.p(('xxx', 'little'))
lm = arpa.loadf(TEST_ARPA)[0]
assert lm.p('foo') == lm.p(('foo', ))
assert lm.p('xxx') == lm.p(('xxx', ))
assert lm.p('a little') == lm.p(('a', 'little'))
assert lm.p('xxx little') == lm.p(('xxx', 'little'))
def test_check_input_list():
result = ARPAModel._check_input(['foo', 'bar'])
assert isinstance(result, tuple)
def test_check_input_string_word():
result = ARPAModel._check_input('foo')
assert isinstance(result, tuple) and len(result) == 1
def test_check_input_string_words():
result = ARPAModel._check_input('foo bar')
assert isinstance(result, tuple) and len(result) == 2
def test_new_model_order():
lm = ARPAModelSimple()
assert lm.order() is None
for p in PARSERS:
lm = arpa.loadf(TEST_ARPA, parser=p)[0]
assert lm.order() == 5
| mit | -8,238,129,910,528,234,000 | 23.245902 | 60 | 0.613252 | false |
MPBAUnofficial/cmsplugin_image_gallery | cmsplugin_image_gallery/models.py | 1 | 4086 | import threading
from cms.models import CMSPlugin
from django.db import models
from django.utils.translation import ugettext_lazy as _
from inline_ordering.models import Orderable
from filer.fields.image import FilerImageField
from django.core.exceptions import ValidationError
import utils
localdata = threading.local()
localdata.TEMPLATE_CHOICES = utils.autodiscover_templates()
TEMPLATE_CHOICES = localdata.TEMPLATE_CHOICES
class GalleryPlugin(CMSPlugin):
def copy_relations(self, oldinstance):
for img in oldinstance.image_set.all():
new_img = Image()
new_img.gallery=self
new_img.inline_ordering_position = img.inline_ordering_position
new_img.src = img.src
new_img.image_url = img.image_url
new_img.title = img.title
new_img.alt = img.alt
new_img.save()
template = models.CharField(max_length=255,
choices=TEMPLATE_CHOICES,
default='cmsplugin_gallery/gallery.html',
editable=len(TEMPLATE_CHOICES) > 1)
def __unicode__(self):
return _(u'%(count)d image(s) in gallery') % {'count': self.image_set.count()}
class Image(Orderable):
def get_media_path(self, filename):
pages = self.gallery.placeholder.page_set.all()
return pages[0].get_media_path(filename)
gallery = models.ForeignKey(
GalleryPlugin,
verbose_name=_("gallery")
)
src = FilerImageField(
null=True,
blank=True,
verbose_name=_("image")
)
image_url = models.URLField(
_("alternative image url"),
verify_exists=True,
null=True,
blank=True,
default=None
)
link_url = models.URLField(
_("link url"),
verify_exists=True,
null=True,
blank=True,
default=None,
help_text=_("url used when user click on the image")
)
src_height = models.PositiveSmallIntegerField(
_("image height"),
editable=False,
null=True
)
src_width = models.PositiveSmallIntegerField(
_("image width"),
editable=False,
null=True
)
title = models.CharField(
_("title"),
max_length=255,
blank=True
)
alt = models.CharField(
_("alt text"),
max_length=80,
blank=True
)
def clean(self):
if not self.src and not self.image_url:
raise ValidationError(_("Image not specified, use image or alternative url to specify the image source"))
def __unicode__(self):
return self.title or self.alt or str(self.pk)
#I don't know why, but insert class Meta in Image cause Orderable class field to doesn't work
#but this small hack solve the problem
Image._meta.get_field('inline_ordering_position').verbose_name = _("Inline ordering position")
Image._meta.verbose_name = _("Image")
Image._meta.verbose_name_plural = _("Images")
| bsd-2-clause | -7,195,980,818,986,026,000 | 37.186916 | 117 | 0.453255 | false |
eyzhou123/python-games | tetris.py | 1 | 10241 | #tetris.py
from Tkinter import *
import random
def tetrisMousePressed(canvas,event):
tetrisRedrawAll(canvas)
def tetrisKeyPressed(canvas,event):
if event.keysym == "r":
tetrisInit(canvas)
if (canvas.data.isTetrisGameOver == False):
if event.keysym == "Left":
moveFallingPiece(canvas,0,-1)
elif event.keysym == "Right":
moveFallingPiece(canvas,0,+1)
elif event.keysym == "Up":
rotateFallingPiece(canvas)
elif event.keysym == "Down":
moveFallingPiece(canvas,+1,0)
tetrisRedrawAll(canvas)
def tetrisTimerFired(canvas):
if (canvas.data.isTetrisGameOver == False):
if moveFallingPiece(canvas,+1,0) == True:
moveFallingPiece(canvas,+1,0)
else:
placeFallingPiece(canvas)
newFallingPiece(canvas)
removeFullRows(canvas)
if (fallingPieceIsLegal(canvas) == False):
tetrisGameOver(canvas)
tetrisRedrawAll(canvas)
delay = 350 # milliseconds
def f():
tetrisTimerFired(canvas)
canvas.after(delay, f)# pause, then call timerFired again
def tetrisGameOver(canvas):
canvas.data.isTetrisGameOver = True
def tetrisRedrawAll(canvas):
canvas.delete(ALL)
drawTetrisGame(canvas)
drawTetrisScore(canvas)
if (canvas.data.isTetrisGameOver == True):
canvas.create_text(canvas.data.width/2,
canvas.data.height/2,text="Game Over!",font=("Helvetica",
32, "bold"))
def loadTetrisBoard(canvas):
(rows,cols) = (canvas.data.rows,canvas.data.cols)
canvas.data.tetrisBoard = [([canvas.data.emptyColor]*cols) for
row in xrange(rows)]
def drawTetrisGame(canvas):
canvas.create_rectangle(0,0,canvas.data.width,canvas.data.height,
fill = "orange")
drawTetrisBoard(canvas)
drawFallingPiece(canvas)
def drawTetrisBoard(canvas):
tetrisBoard = canvas.data.tetrisBoard
(rows,cols) = (len(tetrisBoard),len(tetrisBoard[0]))
for row in xrange(rows):
for col in xrange(cols):
color = tetrisBoard[row][col]
drawTetrisCell(canvas,row,col,color)
def drawTetrisCell(canvas,row,col,color):
tetrisBoard = canvas.data.tetrisBoard
margin = canvas.data.margin
cellSize = canvas.data.cellSize
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
canvas.create_rectangle(left, top, right, bottom,
fill = "black")
canvas.create_rectangle(left+1,top+1,right-1,bottom-1, #thin outline, use 1
fill = color)
def drawFallingPiece(canvas):
tetrisBoard = canvas.data.tetrisBoard
canvas.data.fallingPieceRows = len(canvas.data.fallingPiece)
canvas.data.fallingPieceCols = len(canvas.data.fallingPiece[0])
for row in xrange(canvas.data.fallingPieceRow,
canvas.data.fallingPieceRow + canvas.data.fallingPieceRows):
for col in xrange(canvas.data.fallingPieceCol,
canvas.data.fallingPieceCol + canvas.data.fallingPieceCols):
if (canvas.data.fallingPiece[row-canvas.data.fallingPieceRow
][col-canvas.data.fallingPieceCol] == True):
drawTetrisCell(canvas,row,col,canvas.data.fallingPieceColor)
def newFallingPiece(canvas):
i = random.randint(0,len(canvas.data.tetrisPieces)-1)
canvas.data.fallingPiece = canvas.data.tetrisPieces[i]
canvas.data.fallingPieceColor = canvas.data.tetrisPieceColors[i]
canvas.data.fallingPieceRow = 0
canvas.data.fallingPieceCol = (canvas.data.cols/2 -
canvas.data.fallingPieceWidth/2)
def moveFallingPiece(canvas,drow,dcol):
canvas.data.fallingPieceRow += drow
canvas.data.fallingPieceCol += dcol
if (fallingPieceIsLegal(canvas) == False):
canvas.data.fallingPieceRow -= drow
canvas.data.fallingPieceCol -= dcol
return False
return True
def rotateFallingPiece(canvas):
fallingPiece = canvas.data.fallingPiece
(fallingPieceRow,fallingPieceCol) = (canvas.data.fallingPieceRow,
canvas.data.fallingPieceCol)
(fallingPieceRows,fallingPieceCols) = (canvas.data.fallingPieceRows,
canvas.data.fallingPieceCols)
(oldCenterRow,oldCenterCol) = fallingPieceCenter(canvas)
(canvas.data.fallingPieceRows,canvas.data.fallingPieceCols) = (
canvas.data.fallingPieceCols,canvas.data.fallingPieceRows)
(newCenterRow,newCenterCol) = fallingPieceCenter(canvas)
canvas.data.fallingPieceRow +=oldCenterRow - newCenterRow
canvas.data.fallingPieceCol += oldCenterCol - newCenterCol
newCols = []
newList = []
for row in xrange(canvas.data.fallingPieceRows):
newCols = []
for col in xrange(canvas.data.fallingPieceCols):
newCols += [canvas.data.fallingPiece[
canvas.data.fallingPieceCols-1-col][row]]
newList += [newCols]
canvas.data.fallingPiece = newList
if (fallingPieceIsLegal(canvas) == False):
canvas.data.fallingPieceRow = fallingPieceRow
canvas.data.fallingPieceCol = fallingPieceCol
canvas.data.fallingPieceRows = fallingPieceRows
canvas.data.fallingPieceCols = fallingPieceCols
canvas.data.fallingPiece = fallingPiece
def fallingPieceCenter(canvas):
centerRow = canvas.data.fallingPieceRow + canvas.data.fallingPieceRows/2
centerCol = canvas.data.fallingPieceCol + canvas.data.fallingPieceCols/2
return (centerRow,centerCol)
def fallingPieceIsLegal(canvas):
tetrisBoard = canvas.data.tetrisBoard
canvas.data.fallingPieceRows = len(canvas.data.fallingPiece)
canvas.data.fallingPieceCols = len(canvas.data.fallingPiece[0])
for row in xrange(canvas.data.fallingPieceRow,
canvas.data.fallingPieceRow + canvas.data.fallingPieceRows):
for col in xrange(canvas.data.fallingPieceCol,
canvas.data.fallingPieceCol + canvas.data.fallingPieceCols):
if (canvas.data.fallingPiece[row-canvas.data.fallingPieceRow
][col-canvas.data.fallingPieceCol] == True):
if ((row<0) or (row >= canvas.data.rows) or (col<0) or
(col >= canvas.data.cols) or (tetrisBoard[row][col]!=
canvas.data.emptyColor)):
return False
return True
def placeFallingPiece(canvas):
tetrisBoard = canvas.data.tetrisBoard
canvas.data.fallingPieceRows = len(canvas.data.fallingPiece)
canvas.data.fallingPieceCols = len(canvas.data.fallingPiece[0])
for row in xrange(canvas.data.fallingPieceRow,
canvas.data.fallingPieceRow + canvas.data.fallingPieceRows):
for col in xrange(canvas.data.fallingPieceCol,
canvas.data.fallingPieceCol + canvas.data.fallingPieceCols):
if (canvas.data.fallingPiece[row-canvas.data.fallingPieceRow
][col-canvas.data.fallingPieceCol] == True):
tetrisBoard[row][col] = canvas.data.fallingPieceColor
def removeFullRows(canvas):
tetrisBoard = canvas.data.tetrisBoard
fullRows = 0
newRow = canvas.data.rows-1
for oldRow in xrange(canvas.data.rows-1,-1,-1):
if (canvas.data.emptyColor in tetrisBoard[oldRow]):
for col in xrange(canvas.data.cols):
tetrisBoard[newRow][col] = tetrisBoard[oldRow][col]
newRow -= 1
else:
fullRows += 1
canvas.data.score += fullRows**2
def drawTetrisScore(canvas):
canvas.create_text(canvas.data.cellSize,canvas.data.cellSize/2,
text="Score: " + str(canvas.data.score),anchor=W,
font=("Helvetica",16, "bold"))
def tetrisInit(canvas):
canvas.data.emptyColor = "blue"
loadTetrisBoard(canvas)
canvas.data.iPiece = [
[ True, True, True, True]
]
canvas.data.jPiece = [
[ True, False, False ],
[ True, True, True]
]
canvas.data.lPiece = [
[ False, False, True],
[ True, True, True]
]
canvas.data.oPiece = [
[ True, True],
[ True, True]
]
canvas.data.sPiece = [
[ False, True, True],
[ True, True, False ]
]
canvas.data.tPiece = [
[ False, True, False ],
[ True, True, True]
]
canvas.data.zPiece = [
[ True, True, False ],
[ False, True, True]
]
canvas.data.tetrisPieces = [canvas.data.iPiece, canvas.data.jPiece,
canvas.data.lPiece, canvas.data.oPiece,canvas.data.sPiece,
canvas.data.tPiece, canvas.data.zPiece ]
canvas.data.tetrisPieceColors = [ "red", "yellow", "magenta",
"pink", "cyan", "green", "orange" ]
canvas.data.fallingPiece = canvas.data.tetrisPieces[
random.randint(0,len(canvas.data.tetrisPieces)-1)]
canvas.data.fallingPieceColor = canvas.data.tetrisPieceColors[
canvas.data.tetrisPieces.index(canvas.data.fallingPiece)]
canvas.data.fallingPieceRow = 0
canvas.data.fallingPieceWidth = len(canvas.data.fallingPiece[0])
canvas.data.fallingPieceCol = (canvas.data.cols/2 -
canvas.data.fallingPieceWidth/2)
canvas.data.fallingPieceRows = len(canvas.data.fallingPiece)
canvas.data.fallingPieceCols = len(canvas.data.fallingPiece[0])
canvas.data.isTetrisGameOver = False
canvas.data.score = 0
tetrisRedrawAll(canvas)
def tetrisRun(rows,cols):
# create the root and the canvas
root = Tk()
margin = 30
cellSize = 30
canvasWidth = 2*margin + cols*cellSize
canvasHeight = 2*margin + rows*cellSize
canvas = Canvas(root, width=canvasWidth, height=canvasHeight)
canvas.pack()
root.resizable(width=0, height=0)
# Set up canvas data and call init
class Struct: pass
canvas.data = Struct()
canvas.data.margin = margin
canvas.data.cellSize = cellSize
canvas.data.rows = rows
canvas.data.cols = cols
canvas.data.width = canvasWidth
canvas.data.height = canvasHeight
tetrisInit(canvas)
# set up events
def f(event): tetrisMousePressed(canvas, event)
root.bind("<Button-1>", f)
def g(event): tetrisKeyPressed(canvas, event)
root.bind("<Key>", g)
tetrisTimerFired(canvas)
# and launch the app
root.mainloop() # This call BLOCKS (so your program waits until you close the window!)
tetrisRun(15,10)
| mit | 5,614,054,969,684,136,000 | 36.375912 | 91 | 0.671224 | false |
KeserOner/where-artists-share | was/artists/models.py | 1 | 1280 | from django.contrib.auth.models import User
from django.db import models
from django.dispatch.dispatcher import receiver
class Artists(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
artist_image = models.ImageField(
verbose_name="Artist's profile image",
null=True,
blank=True,
unique=True,
upload_to="artist_image/",
)
artist_banner = models.ImageField(
verbose_name="Artist's banner",
unique=True,
null=True,
blank=True,
upload_to="artist_banner/",
)
artist_bio = models.TextField(max_length=500, verbose_name="Artist's biografy")
artist_signature = models.CharField(
max_length=70, verbose_name="Artist's signature"
)
artist_followed = models.ForeignKey(
"self",
on_delete=models.CASCADE,
related_name="artists_followed",
blank=True,
null=True,
)
def __str__(self):
return "Profil de %s" % self.user.username
@receiver(models.signals.pre_delete, sender=Artists)
def delete_images(sender, instance, **kwargs):
if instance.artist_image:
instance.artist_image.delete(False)
if instance.artist_banner:
instance.artist_banner.delete(False)
| mit | 6,011,123,594,670,129,000 | 25.666667 | 83 | 0.646094 | false |
zinid/mrim | src/protocol.py | 1 | 19357 | from mmptypes import *
import utils
import UserDict
import cStringIO
import socket
import struct
import email
from email.Utils import parsedate
wp_request = {}
wp_request_reversed = {}
for k,v in [(key, locals()[key]) for key in locals().keys() if key.startswith('MRIM_CS_WP_REQUEST_PARAM')]:
wp_request[v] = k
for k,v in wp_request.items():
wp_request_reversed[v] = k
del k,v
message_flags = tuple([v for k,v in locals().items() if k.startswith('MESSAGE_FLAG')])
class MMPParsingError(Exception):
def __init__(self, text, packet):
self.args = text,packet
self.text = text
self.packet = packet
def __str__(self):
return self.text
class MMPHeader(UserDict.UserDict):
def __init__(self,typ=0,dlen=0,seq=0,fromip='0.0.0.0',fromport='0',header=''):
UserDict.UserDict.__init__(self)
self.header = header
self.typ = typ
self.frmt = '5I4s4s16B'
if not self.header:
self['magic'] = CS_MAGIC
self['proto'] = PROTO_VERSION
self['seq'] = seq
self['msg'] = typ
self['from'] = fromip
self['fromport'] = fromport
self['dlen'] = dlen
self['reserved'] = tuple([0 for i in range(16)])
else:
try:
unpacked_header = struct.unpack(self.frmt, self.header)
except struct.error:
raise MMPParsingError("Can't unpack header", self.header)
self['magic'] = unpacked_header[0]
self['proto'] = unpacked_header[1]
self['seq'] = unpacked_header[2]
self['msg'] = unpacked_header[3]
self['dlen'] = unpacked_header[4]
self['from'] = socket.inet_ntoa(unpacked_header[5])
self['fromport'] = socket.inet_ntoa(unpacked_header[6])
self['reserved'] = unpacked_header[7:]
def __str__(self):
if not self.header:
try:
new_header = struct.pack(
self.frmt,
self['magic'],
self['proto'],
self['seq'],
self['msg'],
self['dlen'],
socket.inet_aton(self['from']),
socket.inet_aton(self['fromport']),
*self['reserved']
)
except (struct.error, KeyError):
raise MMPParsingError("Can't pack header", self)
return new_header
else:
return self.header
class MMPBody(UserDict.UserDict):
def __init__(self, typ=0, dict={}, body=''):
UserDict.UserDict.__init__(self)
self.dict = dict
self.body = body
self.typ = typ
if self.body:
self.io = cStringIO.StringIO(body)
self.str2dict(body)
elif self.dict:
self.io = cStringIO.StringIO()
self.update(dict)
def __str__(self):
if self.body:
return self.body
elif self.dict:
return self.dict2str(self.dict)
else:
return ''
def str2dict(self, body):
try:
return self._str2dict(body)
except struct.error:
raise MMPParsingError("Can't unpack body", body)
def dict2str(self, dict):
try:
return self._dict2str(dict)
except (struct.error, KeyError):
raise MMPParsingError("Can't pack body", dict)
def _str2dict(self, body):
if self.typ == MRIM_CS_HELLO_ACK:
self['ping_period'] = self._read_ul()
elif self.typ == MRIM_CS_LOGIN_REJ:
self['reason'] = self._read_lps()
elif self.typ == MRIM_CS_MESSAGE:
self['flags'] = self._read_ul()
self['to'] = self._read_lps()
self['message'] = self._read_lps()
self['rtf-message'] = self.readl_lps()
elif self.typ == MRIM_CS_MESSAGE_ACK:
self['msg_id'] = self._read_ul()
self['flags'] = self._read_ul()
self['from'] = self._read_lps()
self['message'] = self._read_lps()
try:
self['rtf-message'] = self._read_lps()
except struct.error:
self['rtf-message'] = ' '
elif self.typ == MRIM_CS_MESSAGE_RECV:
self['from'] = self._read_lps()
self['msg_id'] = self._read_ul()
elif self.typ == MRIM_CS_MESSAGE_STATUS:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_USER_STATUS:
self['status'] = self._read_ul()
self['user'] = self._read_lps()
elif self.typ == MRIM_CS_LOGOUT:
self['reason'] = self._read_ul()
elif self.typ == MRIM_CS_CONNECTION_PARAMS:
self['ping_period'] = self._read_ul()
elif self.typ == MRIM_CS_ADD_CONTACT:
self['flags'] = self._read_ul()
self['group_id'] = self._read_ul()
self['email'] = self._read_lps()
self['name'] = self._read_lps()
self['phones'] = self._read_ul()
self['text'] = self._read_lps()
elif self.typ == MRIM_CS_ADD_CONTACT_ACK:
self['status'] = self._read_ul()
current_position = self.io.tell()
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
self['contact_id'] = self._read_ul()
else:
return
elif self.typ == MRIM_CS_MODIFY_CONTACT:
self['id'] = self._read_ul()
self['flags'] = self._read_ul()
self['group_id'] = self._read_ul()
self['contact'] = self._read_lps()
self['name'] = self._read_lps()
self['phones'] = self._read_lps()
elif self.typ == MRIM_CS_MODIFY_CONTACT_ACK:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_OFFLINE_MESSAGE_ACK:
self['uidl'] = self._read_uidl()
self['message'] = self._read_lps()
elif self.typ == MRIM_CS_DELETE_OFFLINE_MESSAGE:
self['uidl'] = self._read_uidl()
elif self.typ == MRIM_CS_AUTHORIZE:
self['user'] = self._read_lps()
elif self.typ == MRIM_CS_AUTHORIZE_ACK:
self['user'] = self._read_lps()
elif self.typ == MRIM_CS_CHANGE_STATUS:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_GET_MPOP_SESSION_ACK:
self['status'] = self._read_ul()
self['session'] = self._read_lps()
elif self.typ == MRIM_CS_WP_REQUEST:
current_position = self.io.tell()
while 1:
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
field = self._read_ul()
self[field] = self._read_lps()
current_position = self.io.tell()
else:
break
elif self.typ == MRIM_CS_ANKETA_INFO:
self['status'] = self._read_ul()
self['fields_num'] = self._read_ul()
self['max_rows'] = self._read_ul()
self['server_time'] = self._read_ul()
self['fields'] = [self._read_lps() for i in range(self['fields_num'])]
self['values'] = []
current_position = self.io.tell()
while 1:
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
self['values'].append(tuple([self._read_lps() for i in range(self['fields_num'])]))
current_position = self.io.tell()
else:
break
elif self.typ == MRIM_CS_MAILBOX_STATUS:
self['count'] = self._read_ul()
self['sender'] = self._read_lps()
self['subject'] = self._read_lps()
self['unix_time'] = self._read_ul()
self['key'] = self._read_ul()
elif self.typ == MRIM_CS_MAILBOX_STATUS_OLD:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_CONTACT_LIST2:
self['status'] = self._read_ul()
if self['status'] == GET_CONTACTS_OK:
self['groups_number'] = self._read_ul()
self['groups_mask'] = self._read_lps()
self['contacts_mask'] = self._read_lps()
self['groups'] = [
self._read_masked_field(self['groups_mask']) \
for i in range(self['groups_number'])
]
self['contacts'] = []
while 1:
current_position = self.io.tell()
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
self['contacts'].append(
self._read_masked_field(self['contacts_mask'])
)
else:
break
else:
self['groups_number'] = 0
self['groups_mask'] = self['contacts_mask'] = ''
self['groups'] = self['contacts'] = []
elif self.typ == MRIM_CS_LOGIN2:
self['login'] = self._read_lps()
self['password'] = self._read_lps()
self['status'] = self._read_ul()
self['user_agent'] = self._read_lps()
elif self.typ == MRIM_CS_SMS:
self['UNKNOWN'] = self._read_ul()
self['number'] = self._read_lps()
self['text'] = self._read_lps()
elif self.typ == MRIM_CS_SMS_ACK:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_USER_INFO:
current_position = self.io.tell()
while 1:
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
field = self._read_lps()
if field == 'MESSAGES.TOTAL':
self['total'] = int(self._read_lps())
elif field == 'MESSAGES.UNREAD':
self['unread'] = int(self._read_lps())
elif field == 'MRIM.NICKNAME':
self['nickname'] = self._read_lps()
else:
self[field] = self._read_lps()
current_position = self.io.tell()
else:
break
def _dict2str(self, dict):
self.io = cStringIO.StringIO()
if self.typ == MRIM_CS_HELLO_ACK:
self._write_ul(dict['ping_period'])
elif self.typ == MRIM_CS_LOGIN_REJ:
self._write_lps(dict['reason'])
elif self.typ == MRIM_CS_MESSAGE:
self._write_ul(dict['flags'])
self._write_lps(dict['to'])
self._write_lps(dict['message'])
self._write_lps(dict['rtf-message'])
elif self.typ == MRIM_CS_MESSAGE_ACK:
self._write_ul(dict['msg_id'])
self._write_ul(dict['flags'])
self._write_lps(dict['from'])
self._write_lps(dict['message'])
self._write_lps(dict['rtf-message'])
elif self.typ == MRIM_CS_MESSAGE_RECV:
self._write_lps(dict['from'])
self._write_ul(dict['msg_id'])
elif self.typ == MRIM_CS_MESSAGE_STATUS:
self._write_ul(dict['status'])
elif self.typ == MRIM_CS_USER_STATUS:
self._write_ul(dict['status'])
self._write_lps(dict['user'])
elif self.typ == MRIM_CS_LOGOUT:
self._write_ul(dict['reason'])
elif self.typ == MRIM_CS_CONNECTION_PARAMS:
self._write_ul(dict['ping_period'])
elif self.typ == MRIM_CS_ADD_CONTACT:
self._write_ul(dict['flags'])
self._write_ul(dict['group_id'])
self._write_lps(dict['email'])
self._write_lps(dict['name'])
self._write_lps(dict['phones'])
self._write_lps(dict['text'])
elif self.typ == MRIM_CS_ADD_CONTACT_ACK:
self._write_ul(dict['status'])
self._write_ul(dict['contact_id'])
elif self.typ == MRIM_CS_MODIFY_CONTACT:
self._write_ul(dict['id'])
self._write_ul(dict['flags'])
self._write_ul(dict['group_id'])
self._write_lps(dict['contact'])
self._write_lps(dict['name'])
self._write_lps(dict['phones'])
elif self.typ == MRIM_CS_MODIFY_CONTACT_ACK:
self._write_ul(dict['status'])
elif self.typ == MRIM_CS_OFFLINE_MESSAGE_ACK:
self._write_uidl(dict['uidl'])
self._write_lps(dict['message'])
elif self.typ == MRIM_CS_DELETE_OFFLINE_MESSAGE:
self._write_uidl(dict['uidl'])
elif self.typ == MRIM_CS_AUTHORIZE:
self._write_lps(dict['user'])
elif self.typ == MRIM_CS_AUTHORIZE_ACK:
self._write_lps(dict['user'])
elif self.typ == MRIM_CS_CHANGE_STATUS:
self._write_ul(dict['status'])
elif self.typ == MRIM_CS_GET_MPOP_SESSION_ACK:
self._write_ul(dict['status'])
self._write_lps(dict['session'])
elif self.typ == MRIM_CS_WP_REQUEST:
for k,v in [(p,s) for p,s in dict.items() if p != MRIM_CS_WP_REQUEST_PARAM_ONLINE]:
self._write_ul(k)
self._write_lps(v)
if dict.has_key(MRIM_CS_WP_REQUEST_PARAM_ONLINE):
self._write_ul(MRIM_CS_WP_REQUEST_PARAM_ONLINE)
self._write_lps(dict[MRIM_CS_WP_REQUEST_PARAM_ONLINE])
elif self.typ == MRIM_CS_ANKETA_INFO:
self._write_ul(dict['status'])
self._write_ul(dict['fields_num'])
self._write_ul(dict['max_rows'])
self._write_ul(dict['server_time'])
for field in dict['fields']:
self._write_lps(field)
for value in dict['values']:
self._write_lps(value)
elif self.typ == MRIM_CS_MAILBOX_STATUS:
self._write_ul(dict['status'])
elif self.typ == MRIM_CS_LOGIN2:
self._write_lps(dict['login'])
self._write_lps(dict['password'])
self._write_ul(dict['status'])
self._write_lps(dict['user_agent'])
elif self.typ == MRIM_CS_SMS:
self._write_ul(dict['UNKNOWN'])
self._write_lps(dict['number'])
self._write_lps(dict['text'])
self.io.seek(0)
return self.io.read()
def _read_ul(self):
return struct.unpack('I', self.io.read(4))[0]
def _read_lps(self):
return self.io.read(self._read_ul())
def _read_uidl(self):
return self.io.read(8)
def _write_ul(self, ul):
self.io.write(struct.pack('I', ul))
def _write_lps(self, lps):
self._write_ul(len(lps))
self.io.write(lps)
def _write_uidl(self, uidl):
self.io.write(uidl[:8])
def _read_masked_field(self, mask):
group = []
for i in range(len(mask)):
symbol = mask[i]
if symbol == 'u':
group.append(self._read_ul())
elif symbol == 's':
group.append(self._read_lps())
return tuple(group)
class MMPPacket:
def __init__(self,typ=0,seq=0,fromip='0.0.0.0',fromport='0',dict={},packet=''):
self.header = ''
self.body = ''
self.typ = typ
if packet:
raw_header = packet[:44]
try:
magic = struct.unpack('I', raw_header[:4])[0]
except:
magic = 0
if magic == CS_MAGIC:
self.header = MMPHeader(header=raw_header)
if self.header:
self.typ = self.header['msg']
dlen = self.header['dlen']
self.body = MMPBody(typ=self.typ,body=packet[44:44+dlen])
else:
self.body = MMPBody(self.typ,dict)
dlen = len(self.body.__str__())
self.header = MMPHeader(self.typ,dlen,seq,fromip,fromport)
self.setHeaderAttr('seq', utils.seq())
def __str__(self):
return self.header.__str__() + self.body.__str__()
def getRawVersion(self):
return self.header['proto']
def getVersion(self):
p = self.getRawVersion()
return '%s.%s' % (utils.get_proto_major(p), utils.get_proto_minor(p))
def getType(self):
return self.header['msg']
def getHeader(self):
return self.header
def getBody(self):
return self.body
def getBodyAttr(self, attr):
return self.body[attr]
def getHeaderAttr(self, attr):
return self.header[attr]
def setHeaderAttr(self, attr, val):
self.header[attr] = val
def setBodyAttr(self, attr, val):
self.body[attr] = val
self.body = MMPBody(self.getType(),dict=self.body)
self.setHeaderAttr('dlen', len(self.body.__str__()))
def setIp(self, ip):
self.setHeaderAttr('from', ip)
def setPort(self, port):
self.setHeaderAttr('fromport', port)
def setType(self, new_typ):
self.setHeaderAttr['msg'] = new_typ
def setId(self, _id):
self.setHeaderAttr('seq', _id)
def getId(self):
return self.getHeaderAttr('seq')
def setMsgId(self, msg_id):
self.setBodyAttr('msg_id', msg_id)
def getMsgId(self):
if self.getBody().has_key('msg_id'):
return self.getBodyAttr('msg_id')
class Message(MMPPacket):
def __init__(self,to='',body=' ',flags=[],payload=None):
if not payload:
d = {}
flags_sum = 0
for f in flags:
flags_sum += f
d['flags'] = flags_sum & MESSAGE_USERFLAGS_MASK
d['to'] = to
d['message'] = body
if MESSAGE_FLAG_RTF in flags:
d['rtf-message'] = utils.pack_rtf(body)
else:
d['rtf-message'] = ' '
MMPPacket.__init__(self,typ=MRIM_CS_MESSAGE,dict=d)
self.setHeaderAttr('seq', utils.seq())
else:
MMPPacket.__init__(self,typ=payload.getType(),dict=payload.getBody())
def getTo(self):
return self.getBodyAttr('to')
def getFrom(self):
return self.getBodyAttr('from')
def getBodyPayload(self):
return utils.win2str(self.getBodyAttr('message'))
def getFlags(self):
flag_code = self.getBodyAttr('flags')
flags = []
for f in message_flags:
x = flag_code & f
if x:
flags.append(x)
return flags
def hasFlag(self, flag):
return flag in self.getFlags()
class OfflineMessage(UserDict.UserDict):
def __init__(self, data):
UserDict.UserDict.__init__(self)
self.msg = email.message_from_string(data)
self.boundary = self.msg['Boundary']
self.payload = self.msg.get_payload().split('--%s--' % self.boundary)
self['from'] = self.msg['From']
self['date'] = parsedate(self.msg['Date'])
self['subject'] = self.msg['Subject']
self['flags'] = eval('0x'+self.msg['X-MRIM-Flags'])
self['version'] = self.msg['Version']
self['message'] = utils.win2str(self.payload[0].strip())
self['rtf-message'] = self.payload[1].strip()
def buildMessage(self):
d = {
'msg_id':0,
'flags':self['flags'],
'from':self['from'],
'message':self.payload[0].strip(),
'rtf-message':self['rtf-message']
}
m = MMPPacket(typ=MRIM_CS_MESSAGE_ACK,dict=d)
return Message(payload=m)
def getUTCTime(self):
return utils.msk2utc(self['date'])
class Anketa(MMPPacket):
def __init__(self, data):
MMPPacket.__init__(self,packet=data)
def getStatus(self):
return self.getBodyAttr('status')
def getFields(self):
return self.getBodyAttr('fields')
def getVCards(self):
vcards = []
fields = self.getFields()
for card in self.getBodyAttr('values'):
card_dict = {}
for n in range(self.getBodyAttr('fields_num')):
card_dict[fields[n]] = utils.win2str(card[n])
vcards.append(card_dict)
return vcards
class ContactList:
def __init__(self, packet=None):
self.cids = {}
self.users = {}
self.group = {}
if packet:
self.packet = packet
self.users = self.getUsers()
self.groups = self.getGroups()
i = 0
for u in self.packet.getBodyAttr('contacts'):
_id = 20+i
if (u[0] & CONTACT_FLAG_SMS):
self.cids[u[6]] = _id
else:
self.cids[u[2]] = _id
i += 1
def getGroups(self):
d = {}
for g in self.packet.getBodyAttr('groups'):
d[g[0]] = {'name':utils.win2str(g[1])}
return d
def getUsers(self):
d = {}
for u in self.packet.getBodyAttr('contacts'):
contact = {
'flags':u[0],
'group':u[1],
'nick':utils.win2str(u[3]),
'server_flags':u[4],
'status':u[5],
'phones':u[6]
}
if (u[0] & CONTACT_FLAG_SMS):
d[u[6]] = contact
else:
d[u[2]] = contact
return d
def getEmails(self):
return self.users.keys()
def getUserFlags(self, mail):
return self.users[mail]['flags']
def isValidUser(self, mail):
return not (self.isIgnoredUser(mail) or self.isRemovedUser(mail) or self.isSMSNumber(mail))
def isIgnoredUser(self, mail):
flags = self.getUserFlags(mail)
return bool(flags & CONTACT_FLAG_IGNORE)
def isRemovedUser(self, mail):
flags = self.getUserFlags(mail)
return bool(flags & CONTACT_FLAG_REMOVED)
def isSMSNumber(self, phone):
return not utils.is_valid_email(phone)
def getUserId(self, mail):
return self.cids[mail]
def setUserId(self, mail, _id):
self.cids[mail] = _id
def getUserStatus(self, mail):
status = 1
if utils.is_valid_email(mail):
status = self.users[mail]['status']
return status
def setUserStatus(self, mail, status):
self.users[mail]['status'] = status
def getAuthFlag(self, mail):
return self.users[mail]['server_flags']
def setAuthFlag(self, mail, flag):
self.users[mail]['server_flags'] = flag
def isAuthorized(self, mail):
return not bool(self.getAuthFlag(mail) & 0x1)
def getUserGroup(self, mail):
return self.users[mail]['group']
def setUserGroup(self, mail, gid):
self.users[mail]['group'] = gid
def getUserNick(self, mail):
return self.users[mail]['nick']
def setUserNick(self, mail, nick):
self.users[mail]['nick'] = nick
def delUser(self, mail):
return self.users.pop(mail)
def delGroup(self, gid):
return self.groups.pop(gid)
def getGroupName(self, gid):
name = 'unknown'
try:
name = self.groups[gid]
except KeyError:
pass
return name
def setGroupName(self, gid, name):
self.groups[gid] = name
def getGroupMembers(self, gid):
members = []
for u in self.users:
if self.getUserGroup(u) == gid:
members.append(u)
return members
def getPhones(self, mail):
phones = self.users[mail]['phones']
if phones:
return phones.split(',')
else:
return []
def setPhones(self, mail, phones):
self.users[mail]['phones'] = ','.join(phones[:3])
| gpl-3.0 | -3,714,155,334,947,474,000 | 26.613409 | 107 | 0.631916 | false |
gw280/skia | tools/test_pictures.py | 1 | 6084 | '''
Compares the rendererings of serialized SkPictures to expected images.
Launch with --help to see more information.
Copyright 2012 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
# common Python modules
import os
import optparse
import sys
import shutil
import tempfile
USAGE_STRING = 'Usage: %s input... expectedDir'
HELP_STRING = '''
Compares the renderings of serialized SkPicture files and directories specified
by input with the images in expectedDir. Note, files in directoriers are
expected to end with .skp.
'''
def RunCommand(command):
"""Run a command.
@param command the command as a single string
"""
print 'running command [%s]...' % command
os.system(command)
def FindPathToProgram(program):
"""Return path to an existing program binary, or raise an exception if we
cannot find one.
@param program the name of the program that is being looked for
"""
trunk_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
possible_paths = [os.path.join(trunk_path, 'out', 'Release', program),
os.path.join(trunk_path, 'out', 'Debug', program),
os.path.join(trunk_path, 'out', 'Release',
program + ".exe"),
os.path.join(trunk_path, 'out', 'Debug',
program + ".exe")]
for try_path in possible_paths:
if os.path.isfile(try_path):
return try_path
raise Exception('cannot find %s in paths %s; maybe you need to '
'build %s?' % (program, possible_paths, program))
def RenderImages(inputs, render_dir, options):
"""Renders the serialized SkPictures.
Uses the render_pictures program to do the rendering.
@param inputs the location(s) to read the serlialized SkPictures
@param render_dir the location to write out the rendered images
"""
renderer_path = FindPathToProgram('render_pictures')
inputs_as_string = " ".join(inputs)
command = '%s %s %s' % (renderer_path, inputs_as_string, render_dir)
if (options.mode is not None):
command += ' --mode %s' % ' '.join(options.mode)
if (options.device is not None):
command += ' --device %s' % options.device
RunCommand(command)
def DiffImages(expected_dir, comparison_dir, diff_dir):
"""Diffs the rendered SkPicture images with the baseline images.
Uses the skdiff program to do the diffing.
@param expected_dir the location of the baseline images.
@param comparison_dir the location of the images to comapre with the
baseline
@param diff_dir the location to write out the diff results
"""
skdiff_path = FindPathToProgram('skdiff')
RunCommand('%s %s %s %s %s' %
(skdiff_path, expected_dir, comparison_dir, diff_dir,
'--noprintdirs'))
def Cleanup(options, render_dir, diff_dir):
"""Deletes any temporary folders and files created.
@param options The OptionParser object that parsed if render_dir or diff_dir
was set
@param render_dir the directory where the rendered images were written
@param diff_dir the directory where the diff results were written
"""
if (not options.render_dir):
if (os.path.isdir(render_dir)):
shutil.rmtree(render_dir)
if (not options.diff_dir):
if (os.path.isdir(diff_dir)):
shutil.rmtree(diff_dir)
def ModeParse(option, opt_str, value, parser):
"""Parses the --mode option of the commandline.
The --mode option will either take in three parameters (if tile or
pow2tile) or a single parameter (otherwise).
"""
result = [value]
if value == "tile":
if (len(parser.rargs) < 2):
raise optparse.OptionValueError(("--mode tile mising width"
" and/or height parameters"))
result.extend(parser.rargs[:2])
del parser.rargs[:2]
elif value == "pow2tile":
if (len(parser.rargs) < 2):
raise optparse.OptionValueError(("--mode pow2tile mising minWidth"
" and/or height parameters"))
result.extend(parser.rargs[:2])
del parser.rargs[:2]
setattr(parser.values, option.dest, result)
def Main(args):
"""Allow other scripts to call this script with fake command-line args.
@param The commandline argument list
"""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option('--render_dir', dest='render_dir',
help = ("specify the location to output the rendered files."
" Default is a temp directory."))
parser.add_option('--diff_dir', dest='diff_dir',
help = ("specify the location to output the diff files."
" Default is a temp directory."))
parser.add_option('--mode', dest='mode', type='string',
action="callback", callback=ModeParse,
help = ("specify how rendering is to be done."))
parser.add_option('--device', dest='device',
help = ("specify the device to render to."))
options, arguments = parser.parse_args(args)
if (len(arguments) < 3):
print("Expected at least one input and one ouput folder.")
parser.print_help()
sys.exit(-1)
inputs = arguments[1:-1]
expected_dir = arguments[-1]
if (options.render_dir):
render_dir = options.render_dir
else:
render_dir = tempfile.mkdtemp()
if (options.diff_dir):
diff_dir = options.diff_dir
else:
diff_dir = tempfile.mkdtemp()
try:
RenderImages(inputs, render_dir, options)
DiffImages(expected_dir, render_dir, diff_dir)
finally:
Cleanup(options, render_dir, diff_dir)
if __name__ == '__main__':
Main(sys.argv)
| bsd-3-clause | -5,432,955,374,800,524,000 | 33.179775 | 80 | 0.609796 | false |
cfhamlet/os-urlpattern | src/os_urlpattern/pattern_maker.py | 1 | 4225 | """Pattern clustering procedure APIs.
"""
from .compat import itervalues
from .config import get_default_config
from .definition import BasePattern
from .parse_utils import EMPTY_PARSED_PIECE, ParsedPiece
from .parser import fuzzy_digest, parse
from .pattern_cluster import cluster
from .piece_pattern_node import PiecePatternNode, build_from_parsed_pieces
from .utils import TreeNode, build_tree, dump_tree, pick
class PatternMaker(object):
"""Scaffold for simplifying clustering.
After load urls, iterate all sub makers make cluster
individually or cluster all by calling make method.
"""
def __init__(self, config=None):
self._config = get_default_config() if config is None else config
self._makers = {}
@property
def makers(self):
"""iterable: For iterating all sub makers."""
return itervalues(self._makers)
def load(self, url, meta=None):
"""Load url and meta.
Args:
url (str): The URL to be loaded.
meta (object, optional): Defaults to None. Meta data will be
merged at each cluster and can be accessed by clustered
node's meta property.
Returns:
tuple: 2-tules, (node, is_new).
"""
url_meta, parsed_pieces = parse(url)
if not isinstance(parsed_pieces[0], ParsedPiece):
raise ValueError('Invalid URL')
sid = fuzzy_digest(url_meta, parsed_pieces)
if sid not in self._makers:
self._makers[sid] = Maker(url_meta, self._config)
return self._makers[sid].load(parsed_pieces, meta=meta)
def make(self, combine=False):
"""Iterate all sub makers, start clustering and yield clustered.
Args:
combine (bool, optional): Defaults to False. Combine the
same url_meta clusters into a patten tree.
Yields:
tuple: 2-tuple, (url_meta, clustered). The clustered is the
root of a clustered tree.
"""
for maker in self.makers:
for clustered in maker.make(combine):
yield maker.url_meta, clustered
class Maker(object):
"""Low-level APIs for clustering.
Suppose this will only be used for same fuzzy-digest clustering.
"""
def __init__(self, url_meta, config=None):
self._url_meta = url_meta
self._config = get_default_config() if config is None else config
self._root = PiecePatternNode((EMPTY_PARSED_PIECE, None))
@property
def url_meta(self):
"""URLMeta: The URLMeta object."""
return self._url_meta
def load(self, parsed_pieces, meta=None):
"""Load parsed pieces and meta.
Args:
parsed_pieces (list): The parsed pieces to be loaded.
meta (object, optional): Defaults to None. Meta data will be
merged at each cluster and can be accessed by clustered
node's meta property.
Returns:
tuple: 2-tules, (node, is_new).
"""
return build_from_parsed_pieces(self._root,
parsed_pieces,
meta=meta)
def _cluster(self):
for clustered in cluster(self._config,
self._url_meta,
self._root):
yield clustered
def _combine_clusters(self):
root = TreeNode(BasePattern.EMPTY)
for clustered in self._cluster():
nodes = pick(dump_tree(clustered))
build_tree(root, [(n.pattern, n.pattern)
for n in nodes[1:]], nodes[0].count)
yield root
def make(self, combine=False):
"""Start clustering and yield clustered.
Args:
combine (bool, optional): Defaults to False. Combine the
clusters into a patten tree.
Yields:
TreeNode: Root of the clustered tree. If combine=False yield
all clustered parsed piece trees otherwise yield a
combined pattern tree.
"""
if combine:
return self._combine_clusters()
return self._cluster()
| mit | -7,779,818,026,958,898,000 | 32.531746 | 74 | 0.586746 | false |
nosix/PyCraft | src/pycraft/service/composite/entity/monster.py | 1 | 1206 | # -*- coding: utf8 -*-
from pycraft.service.const import EntityType
from pycraft.service.primitive.geometry import Size
from .base import MobEntity
from .player import PlayerEntity
class MonsterEntity(MobEntity):
def has_hostile(self, entity):
return isinstance(entity, PlayerEntity)
class Zombie(MonsterEntity):
TYPE = EntityType.ZOMBIE
STRENGTH = 10
BODY_SIZE = Size(0.6, 0.6, 1.95)
VIEW_DISTANCE = 64
VIEW_ANGLE_H = 60
VIEW_ANGLE_V = 30
class Skeleton(MonsterEntity):
TYPE = EntityType.SKELTON
STRENGTH = 10
BODY_SIZE = Size(0.6, 0.6, 1.8)
VIEW_DISTANCE = 64
VIEW_ANGLE_H = 60
VIEW_ANGLE_V = 30
class Creeper(MonsterEntity):
TYPE = EntityType.CREEPER
STRENGTH = 10
BODY_SIZE = Size(0.6, 0.6, 1.8)
VIEW_DISTANCE = 64
VIEW_ANGLE_H = 60
VIEW_ANGLE_V = 30
class Spider(MonsterEntity):
TYPE = EntityType.SPIDER
STRENGTH = 8
BODY_SIZE = Size(1.4, 1.4, 0.9)
VIEW_DISTANCE = 32
def can_climb(self):
return True
class Enderman(MonsterEntity):
TYPE = EntityType.ENDERMAN
STRENGTH = 20
BODY_SIZE = Size(0.6, 0.6, 2.9)
VIEW_ANGLE_H = 90
VIEW_ANGLE_V = 10
| lgpl-3.0 | 2,602,586,687,267,260,000 | 18.451613 | 51 | 0.64262 | false |
diofant/diofant | diofant/core/function.py | 1 | 77992 | """
There are three types of functions implemented in Diofant:
1) defined functions (in the sense that they can be evaluated) like
exp or sin; they have a name and a body:
f = exp
2) undefined function which have a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) anonymous function (or lambda function) which have a body (defined
with dummy variables) but have no name:
f = Lambda(x, exp(x)*x)
f = Lambda((x, y), exp(x)*y)
Examples
========
>>> f(x)
f(x)
>>> print(repr(f(x).func))
Function('f')
>>> f(x).args
(x,)
"""
from __future__ import annotations
import collections
import inspect
import typing
import mpmath
import mpmath.libmp as mlib
from ..utilities import default_sort_key, ordered
from ..utilities.iterables import uniq
from .add import Add
from .assumptions import ManagedProperties
from .basic import Basic
from .cache import cacheit
from .compatibility import as_int, is_sequence, iterable
from .containers import Dict, Tuple
from .decorators import _sympifyit
from .evalf import PrecisionExhausted
from .evaluate import global_evaluate
from .expr import AtomicExpr, Expr
from .logic import fuzzy_and
from .numbers import Float, Integer, Rational, nan
from .operations import LatticeOp
from .rules import Transform
from .singleton import S
from .sympify import sympify
def _coeff_isneg(a):
"""Return True if the leading Number is negative.
Examples
========
>>> _coeff_isneg(-3*pi)
True
>>> _coeff_isneg(Integer(3))
False
>>> _coeff_isneg(-oo)
True
>>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1
False
"""
if a.is_Mul or a.is_MatMul:
a = a.args[0]
return a.is_Number and a.is_negative
class PoleError(Exception):
"""Raised when an expansion pole is encountered."""
class ArgumentIndexError(ValueError):
"""Raised when an invalid operation for positional argument happened."""
def __str__(self):
return ('Invalid operation with argument number %s for Function %s' %
(self.args[1], self.args[0]))
class FunctionClass(ManagedProperties):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
def __init__(self, *args, **kwargs):
assert hasattr(self, 'eval')
evalargspec = inspect.getfullargspec(self.eval)
if evalargspec.varargs:
evalargs = None
else:
evalargs = len(evalargspec.args) - 1 # subtract 1 for cls
if evalargspec.defaults:
# if there are default args then they are optional; the
# fewest args will occur when all defaults are used and
# the most when none are used (i.e. all args are given)
evalargs = tuple(range(evalargs - len(evalargspec.defaults),
evalargs + 1))
# honor kwarg value or class-defined value before using
# the number of arguments in the eval function (if present)
nargs = kwargs.pop('nargs', self.__dict__.get('nargs', evalargs))
super().__init__(args, kwargs)
# Canonicalize nargs here; change to set in nargs.
if is_sequence(nargs):
if not nargs:
raise ValueError('Incorrectly specified nargs as %s' % str(nargs))
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = as_int(nargs),
self._nargs = nargs
@property
def __signature__(self):
"""
Allow inspect.signature to give a useful signature for
Function subclasses.
"""
# TODO: Look at nargs
return inspect.signature(self.eval)
@property
def nargs(self):
"""Return a set of the allowed number of arguments for the function.
Examples
========
If the function can take any number of arguments, the set of whole
numbers is returned:
>>> Function('f').nargs
Naturals0()
If the function was initialized to accept one or more arguments, a
corresponding set will be returned:
>>> Function('f', nargs=1).nargs
{1}
>>> Function('f', nargs=(2, 1)).nargs
{1, 2}
The undefined function, after application, also has the nargs
attribute; the actual number of arguments is always available by
checking the ``args`` attribute:
>>> f(1).nargs
Naturals0()
>>> len(f(1).args)
1
"""
from ..sets.sets import FiniteSet
# XXX it would be nice to handle this in __init__ but there are import
# problems with trying to import FiniteSet there
return FiniteSet(*self._nargs) if self._nargs else S.Naturals0
def __repr__(self):
if issubclass(self, AppliedUndef):
return f'Function({self.__name__!r})'
else:
return self.__name__
def __str__(self):
return self.__name__
class Application(Expr, metaclass=FunctionClass):
"""
Base class for applied functions.
Instances of Application represent the result of applying an application of
any type to any object.
"""
is_Function = True
@cacheit
def __new__(cls, *args, **options):
from ..sets.fancysets import Naturals0
from ..sets.sets import FiniteSet
args = list(map(sympify, args))
evaluate = options.pop('evaluate', global_evaluate[0])
# WildFunction (and anything else like it) may have nargs defined
# and we throw that value away here
options.pop('nargs', None)
if options:
raise ValueError(f'Unknown options: {options}')
if evaluate:
if nan in args:
return nan
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
obj = super().__new__(cls, *args, **options)
# make nargs uniform here
try:
# things passing through here:
# - functions subclassed from Function (e.g. myfunc(1).nargs)
# - functions like cos(1).nargs
# - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs
# Canonicalize nargs here
if is_sequence(obj.nargs):
nargs = tuple(ordered(set(obj.nargs)))
elif obj.nargs is not None:
nargs = as_int(obj.nargs),
else:
nargs = None
except AttributeError:
# things passing through here:
# - WildFunction('f').nargs
# - AppliedUndef with no nargs like Function('f')(1).nargs
nargs = obj._nargs # note the underscore here
obj.nargs = FiniteSet(*nargs) if nargs else Naturals0()
return obj
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
"""
return
def _eval_subs(self, old, new):
if (old.is_Function and new.is_Function and old == self.func and
len(self.args) in new.nargs):
return new(*self.args)
class Function(Application, Expr):
"""Base class for applied mathematical functions.
It also serves as a constructor for undefined function classes.
Examples
========
First example shows how to use Function as a constructor for undefined
function classes:
>>> g = g(x)
>>> f
f
>>> f(x)
f(x)
>>> g
g(x)
>>> f(x).diff(x)
Derivative(f(x), x)
>>> g.diff(x)
Derivative(g(x), x)
In the following example Function is used as a base class for
``MyFunc`` that represents a mathematical function *MyFunc*. Suppose
that it is well known, that *MyFunc(0)* is *1* and *MyFunc* at infinity
goes to *0*, so we want those two simplifications to occur automatically.
Suppose also that *MyFunc(x)* is real exactly when *x* is real. Here is
an implementation that honours those requirements:
>>> class MyFunc(Function):
...
... @classmethod
... def eval(cls, x):
... if x.is_Number:
... if x == 0:
... return Integer(1)
... elif x is oo:
... return Integer(0)
...
... def _eval_is_real(self):
... return self.args[0].is_real
...
>>> MyFunc(0) + sin(0)
1
>>> MyFunc(oo)
0
>>> MyFunc(3.54).evalf() # Not yet implemented for MyFunc.
MyFunc(3.54)
>>> MyFunc(I).is_real
False
In order for ``MyFunc`` to become useful, several other methods would
need to be implemented. See source code of some of the already
implemented functions for more complete examples.
Also, if the function can take more than one argument, then ``nargs``
must be defined, e.g. if ``MyFunc`` can take one or two arguments
then,
>>> class MyFunc(Function):
... nargs = (1, 2)
...
>>>
"""
@property
def _diff_wrt(self):
"""Allow derivatives wrt functions.
Examples
========
>>> f(x)._diff_wrt
True
"""
return True
@cacheit
def __new__(cls, *args, **options):
# Handle calls like Function('f')
if cls is Function:
return UndefinedFunction(*args, **options)
n = len(args)
if n not in cls.nargs:
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
temp = ('%(name)s takes %(qual)s %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': cls,
'qual': 'exactly' if len(cls.nargs) == 1 else 'at least',
'args': min(cls.nargs),
'plural': 's'*(min(cls.nargs) != 1),
'given': n})
evaluate = options.get('evaluate', global_evaluate[0])
result = super().__new__(cls, *args, **options)
if not evaluate or not isinstance(result, cls):
return result
pr = max(cls._should_evalf(a) for a in result.args)
pr2 = min(cls._should_evalf(a) for a in result.args)
if pr2 > 0:
return result.evalf(mlib.libmpf.prec_to_dps(pr), strict=False)
return result
@classmethod
def _should_evalf(cls, arg):
"""
Decide if the function should automatically evalf().
By default (in this implementation), this happens if (and only if) the
ARG is a floating point number.
This function is used by __new__.
"""
if arg.is_Float:
return arg._prec
if not arg.is_Add:
return -1
re, im = arg.as_real_imag()
l = [a._prec for a in [re, im] if a.is_Float]
l.append(-1)
return max(l)
@classmethod
def class_key(cls):
"""Nice order of classes."""
from ..sets.fancysets import Naturals0
funcs = {
'log': 11,
'sin': 20,
'cos': 21,
'tan': 22,
'cot': 23,
'sinh': 30,
'cosh': 31,
'tanh': 32,
'coth': 33,
'conjugate': 40,
're': 41,
'im': 42,
'arg': 43,
}
name = cls.__name__
try:
i = funcs[name]
except KeyError:
i = 0 if isinstance(cls.nargs, Naturals0) else 10000
return 4, i, name
def _eval_evalf(self, prec):
# Lookup mpmath function based on name
try:
if isinstance(self.func, UndefinedFunction):
# Shouldn't lookup in mpmath but might have ._imp_
raise AttributeError
fname = self.func.__name__
if not hasattr(mpmath, fname):
from ..utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS[fname]
func = getattr(mpmath, fname)
except (AttributeError, KeyError):
try:
return Float(self._imp_(*[i.evalf(prec) for i in self.args]), prec)
except (AttributeError, TypeError, ValueError, PrecisionExhausted):
return
# Convert all args to mpf or mpc
# Convert the arguments to *higher* precision than requested for the
# final result.
# XXX + 5 is a guess, it is similar to what is used in evalf.py. Should
# we be more intelligent about it?
try:
args = [arg._to_mpmath(prec + 5) for arg in self.args]
except ValueError:
return
with mpmath.workprec(prec):
v = func(*args)
return Expr._from_mpmath(v, prec)
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da == 0:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = Function.fdiff(self, i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
return fuzzy_and(a.is_commutative for a in self.args)
def as_base_exp(self):
"""Returns the method as the 2-tuple (base, exponent)."""
return self, Integer(1)
def _eval_aseries(self, n, args0, x, logx):
"""
Compute an asymptotic expansion around args0, in terms of self.args.
This function is only used internally by _eval_nseries and should not
be called directly; derived classes can overwrite this to implement
asymptotic expansions.
"""
from ..utilities.misc import filldedent
raise PoleError(filldedent("""
Asymptotic expansion of %s around %s is
not implemented.""" % (type(self), args0)))
def _eval_nseries(self, x, n, logx):
"""
This function does compute series for multivariate functions,
but the expansion is always in terms of *one* variable.
Examples
========
>>> atan2(x, y).series(x, n=2)
atan2(0, y) + x/y + O(x**2)
>>> atan2(x, y).series(y, n=2)
-y/x + atan2(x, 0) + O(y**2)
This function also computes asymptotic expansions, if necessary
and possible:
>>> loggamma(1/x)._eval_nseries(x, 0, None)
-1/x - log(x)/x + log(x)/2 + O(1)
"""
from ..series import Order
from ..sets.sets import FiniteSet
from .symbol import Dummy
args = self.args
args0 = [t.limit(x, 0) for t in args]
if any(isinstance(t, Expr) and t.is_finite is False for t in args0):
from .numbers import oo, zoo
# XXX could use t.as_leading_term(x) here but it's a little
# slower
a = [t.compute_leading_term(x, logx=logx) for t in args]
a0 = [t.limit(x, 0) for t in a]
if any(t.has(oo, -oo, zoo, nan) for t in a0):
return self._eval_aseries(n, args0, x, logx)
# Careful: the argument goes to oo, but only logarithmically so. We
# are supposed to do a power series expansion "around the
# logarithmic term". e.g.
# f(1+x+log(x))
# -> f(1+logx) + x*f'(1+logx) + O(x**2)
# where 'logx' is given in the argument
a = [t._eval_nseries(x, n, logx) for t in args]
z = [r - r0 for (r, r0) in zip(a, a0)]
p = [Dummy()]*len(z)
q = []
v = None
for ai, zi, pi in zip(a0, z, p):
if zi.has(x):
if v is not None:
raise NotImplementedError
q.append(ai + pi)
v = pi
else:
q.append(ai)
e1 = self.func(*q)
if v is None:
return e1
s = e1._eval_nseries(v, n, logx)
o = s.getO()
s = s.removeO()
return s.subs({v: zi}).expand() + Order(o.expr.subs({v: zi}), x)
if (self.func.nargs is S.Naturals0
or (self.func.nargs == FiniteSet(1) and args0[0])
or any(c > 1 for c in self.func.nargs)):
e = self
e1 = e.expand()
if e == e1:
# for example when e = sin(x+1) or e = sin(cos(x))
# let's try the general algorithm
term = e.subs({x: 0})
if term.is_finite is False:
raise PoleError(f'Cannot expand {self} around 0')
series = term
fact = Integer(1)
_x = Dummy('x', real=True, positive=True)
e = e.subs({x: _x})
for i in range(n - 1):
i += 1
fact *= Rational(i)
e = e.diff(_x)
subs = e.subs({_x: 0})
term = subs*(x**i)/fact
term = term.expand()
series += term
return series + Order(x**n, x)
return e1.nseries(x, n=n, logx=logx)
arg = self.args[0]
f_series = order = Integer(0)
i, terms = 0, []
while order == 0 or i <= n:
term = self.taylor_term(i, arg, *terms)
term = term.nseries(x, n=n, logx=logx)
terms.append(term)
if term:
f_series += term
order = Order(term, x)
i += 1
return f_series + order
def fdiff(self, argindex=1):
"""Returns the first derivative of the function."""
from .symbol import Dummy
if not (1 <= argindex <= len(self.args)):
raise ArgumentIndexError(self, argindex)
if self.args[argindex - 1].is_Symbol:
for i in range(len(self.args)):
if i == argindex - 1:
continue
# See issue sympy/sympy#8510
if self.args[argindex - 1] in self.args[i].free_symbols:
break
else:
return Derivative(self, self.args[argindex - 1], evaluate=False)
# See issue sympy/sympy#4624 and issue sympy/sympy#4719
# and issue sympy/sympy#5600
arg_dummy = Dummy(f'xi_{argindex:d}')
arg_dummy.dummy_index = hash(self.args[argindex - 1])
new_args = list(self.args)
new_args[argindex-1] = arg_dummy
return Subs(Derivative(self.func(*new_args), arg_dummy),
(arg_dummy, self.args[argindex - 1]))
def _eval_as_leading_term(self, x):
"""Stub that should be overridden by new Functions to return
the first non-zero term in a series if ever an x-dependent
argument whose leading term vanishes as x -> 0 might be encountered.
See, for example, cos._eval_as_leading_term.
"""
from ..series import Order
args = [a.as_leading_term(x) for a in self.args]
o = Order(1, x)
if any(x in a.free_symbols and o.contains(a) for a in args):
# Whereas x and any finite number are contained in O(1, x),
# expressions like 1/x are not. If any arg simplified to a
# vanishing expression as x -> 0 (like x or x**2, but not
# 3, 1/x, etc...) then the _eval_as_leading_term is needed
# to supply the first non-zero term of the series,
#
# e.g. expression leading term
# ---------- ------------
# cos(1/x) cos(1/x)
# cos(cos(x)) cos(1)
# cos(x) 1 <- _eval_as_leading_term needed
# sin(x) x <- _eval_as_leading_term needed
#
raise NotImplementedError(
f'{self.func} has no _eval_as_leading_term routine')
else:
return self.func(*args)
class AppliedUndef(Function):
"""
Base class for expressions resulting from the application of an undefined
function.
"""
def __new__(cls, *args, **options):
args = list(map(sympify, args))
obj = super().__new__(cls, *args, **options)
return obj
def _eval_as_leading_term(self, x):
return self
class UndefinedFunction(FunctionClass):
"""The (meta)class of undefined functions."""
def __new__(cls, name, **kwargs):
ret = type.__new__(cls, name, (AppliedUndef,), kwargs)
ret.__module__ = None
return ret
def __instancecheck__(self, instance):
return self in type(instance).__mro__
def __eq__(self, other):
return (isinstance(other, self.__class__) and
(self.class_key() == other.class_key()))
def __hash__(self):
return super().__hash__()
class WildFunction(Function, AtomicExpr):
"""
A WildFunction function matches any function (with its arguments).
Examples
========
>>> F = WildFunction('F')
>>> F.nargs
Naturals0()
>>> x.match(F)
>>> F.match(F)
{F_: F_}
>>> f(x).match(F)
{F_: f(x)}
>>> cos(x).match(F)
{F_: cos(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a given number of arguments, set ``nargs`` to the
desired value at instantiation:
>>> F = WildFunction('F', nargs=2)
>>> F.nargs
{2}
>>> f(x).match(F)
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a range of arguments, set ``nargs`` to a tuple
containing the desired number of arguments, e.g. if ``nargs = (1, 2)``
then functions with 1 or 2 arguments will be matched.
>>> F = WildFunction('F', nargs=(1, 2))
>>> F.nargs
{1, 2}
>>> f(x).match(F)
{F_: f(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
>>> f(x, y, 1).match(F)
"""
include: set[typing.Any] = set()
def __init__(self, name, **assumptions):
from ..sets.sets import FiniteSet, Set
self.name = name
nargs = assumptions.pop('nargs', S.Naturals0)
if not isinstance(nargs, Set):
# Canonicalize nargs here. See also FunctionClass.
if is_sequence(nargs):
nargs = tuple(ordered(set(nargs)))
else:
nargs = as_int(nargs),
nargs = FiniteSet(*nargs)
self.nargs = nargs
def _matches(self, expr, repl_dict={}):
"""Helper method for match()
See Also
========
diofant.core.basic.Basic.matches
"""
if not isinstance(expr, (AppliedUndef, Function)):
return
if len(expr.args) not in self.nargs:
return
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
class Derivative(Expr):
"""
Carries out differentiation of the given expression with respect to symbols.
expr must define ._eval_derivative(symbol) method that returns
the differentiation result. This function only needs to consider the
non-trivial case where expr contains symbol and it should call the diff()
method internally (not _eval_derivative); Derivative should be the only
one to call _eval_derivative.
Simplification of high-order derivatives:
Because there can be a significant amount of simplification that can be
done when multiple differentiations are performed, results will be
automatically simplified in a fairly conservative fashion unless the
keyword ``simplify`` is set to False.
>>> e = sqrt((x + 1)**2 + x)
>>> diff(e, (x, 5), simplify=False).count_ops()
136
>>> diff(e, (x, 5)).count_ops()
30
Ordering of variables:
If evaluate is set to True and the expression can not be evaluated, the
list of differentiation symbols will be sorted, that is, the expression is
assumed to have continuous derivatives up to the order asked. This sorting
assumes that derivatives wrt Symbols commute, derivatives wrt non-Symbols
commute, but Symbol and non-Symbol derivatives don't commute with each
other.
Derivative wrt non-Symbols:
This class also allows derivatives wrt non-Symbols that have _diff_wrt
set to True, such as Function and Derivative. When a derivative wrt a non-
Symbol is attempted, the non-Symbol is temporarily converted to a Symbol
while the differentiation is performed.
Note that this may seem strange, that Derivative allows things like
f(g(x)).diff(g(x)), or even f(cos(x)).diff(cos(x)). The motivation for
allowing this syntax is to make it easier to work with variational calculus
(i.e., the Euler-Lagrange method). The best way to understand this is that
the action of derivative with respect to a non-Symbol is defined by the
above description: the object is substituted for a Symbol and the
derivative is taken with respect to that. This action is only allowed for
objects for which this can be done unambiguously, for example Function and
Derivative objects. Note that this leads to what may appear to be
mathematically inconsistent results. For example::
>>> (2*cos(x)).diff(cos(x))
2
>>> (2*sqrt(1 - sin(x)**2)).diff(cos(x))
0
This appears wrong because in fact 2*cos(x) and 2*sqrt(1 - sin(x)**2) are
identically equal. However this is the wrong way to think of this. Think
of it instead as if we have something like this::
>>> from diofant.abc import s
>>> def f(u):
... return 2*u
...
>>> def g(u):
... return 2*sqrt(1 - u**2)
...
>>> f(cos(x))
2*cos(x)
>>> g(sin(x))
2*sqrt(-sin(x)**2 + 1)
>>> f(c).diff(c)
2
>>> f(c).diff(c)
2
>>> g(s).diff(c)
0
>>> g(sin(x)).diff(cos(x))
0
Here, the Symbols c and s act just like the functions cos(x) and sin(x),
respectively. Think of 2*cos(x) as f(c).subs({c: cos(x)}) (or f(c) *at*
c = cos(x)) and 2*sqrt(1 - sin(x)**2) as g(s).subs({s: sin(x)}) (or g(s) *at*
s = sin(x)), where f(u) == 2*u and g(u) == 2*sqrt(1 - u**2). Here, we
define the function first and evaluate it at the function, but we can
actually unambiguously do this in reverse in Diofant, because
expr.subs({Function: Symbol}) is well-defined: just structurally replace the
function everywhere it appears in the expression.
This is the same notational convenience used in the Euler-Lagrange method
when one says F(t, f(t), f'(t)).diff(f(t)). What is actually meant is
that the expression in question is represented by some F(t, u, v) at u =
f(t) and v = f'(t), and F(t, f(t), f'(t)).diff(f(t)) simply means F(t, u,
v).diff(u) at u = f(t).
We do not allow derivatives to be taken with respect to expressions where this
is not so well defined. For example, we do not allow expr.diff(x*y)
because there are multiple ways of structurally defining where x*y appears
in an expression, some of which may surprise the reader (for example, a
very strict definition would have that (x*y*z).diff(x*y) == 0).
>>> (x*y*z).diff(x*y)
Traceback (most recent call last):
...
ValueError: Can't differentiate wrt the variable: x*y, 1
Note that this definition also fits in nicely with the definition of the
chain rule. Note how the chain rule in Diofant is defined using unevaluated
Subs objects::
>>> f, g = symbols('f g', cls=Function)
>>> f(2*g(x)).diff(x)
2*Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1, 2*g(x)))
>>> f(g(x)).diff(x)
Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1, g(x)))
Finally, note that, to be consistent with variational calculus, and to
ensure that the definition of substituting a Function for a Symbol in an
expression is well-defined, derivatives of functions are assumed to not be
related to the function. In other words, we have::
>>> diff(f(x), x).diff(f(x))
0
The same is true for derivatives of different orders::
>>> diff(f(x), (x, 2)).diff(diff(f(x), (x, 1)))
0
>>> diff(f(x), (x, 1)).diff(diff(f(x), (x, 2)))
0
Note, any class can allow derivatives to be taken with respect to itself.
Examples
========
Some basic examples:
>>> Derivative(x**2, x, evaluate=True)
2*x
>>> Derivative(Derivative(f(x, y), x), y)
Derivative(f(x, y), x, y)
>>> Derivative(f(x), (x, 3))
Derivative(f(x), x, x, x)
>>> Derivative(f(x, y), y, x, evaluate=True)
Derivative(f(x, y), x, y)
Now some derivatives wrt functions:
>>> Derivative(f(x)**2, f(x), evaluate=True)
2*f(x)
>>> Derivative(f(g(x)), x, evaluate=True)
Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1, g(x)))
"""
is_Derivative = True
@property
def _diff_wrt(self):
"""Allow derivatives wrt Derivatives if it contains a function.
Examples
========
>>> Derivative(f(x), x)._diff_wrt
True
>>> Derivative(x**2, x)._diff_wrt
False
"""
if self.expr.is_Function:
return True
else:
return False
def __new__(cls, expr, *args, **assumptions):
from .symbol import Dummy
expr = sympify(expr)
# There are no args, we differentiate wrt all of the free symbols
# in expr.
if not args:
variables = expr.free_symbols
args = tuple(variables)
if len(variables) != 1:
from ..utilities.misc import filldedent
raise ValueError(filldedent("""
The variable(s) of differentiation
must be supplied to differentiate %s""" % expr))
# Standardize the args by sympifying them and making appending a
# count of 1 if there is only variable: diff(e, x) -> diff(e, (x, 1)).
args = list(sympify(args))
for i, a in enumerate(args):
if not isinstance(a, Tuple):
args[i] = (a, Integer(1))
variable_count = []
all_zero = True
for v, count in args:
if not v._diff_wrt:
from ..utilities.misc import filldedent
ordinal = 'st' if count == 1 else 'nd' if count == 2 else 'rd' if count == 3 else 'th'
raise ValueError(filldedent("""
Can\'t calculate %s%s derivative wrt %s.""" % (count, ordinal, v)))
if count:
if all_zero:
all_zero = False
variable_count.append(Tuple(v, count))
# We make a special case for 0th derivative, because there is no
# good way to unambiguously print this.
if all_zero:
return expr
# Pop evaluate because it is not really an assumption and we will need
# to track it carefully below.
evaluate = assumptions.pop('evaluate', False)
# Look for a quick exit if there are symbols that don't appear in
# expression at all. Note, this cannnot check non-symbols like
# functions and Derivatives as those can be created by intermediate
# derivatives.
if evaluate:
symbol_set = {sc[0] for sc in variable_count if sc[0].is_Symbol}
if symbol_set.difference(expr.free_symbols):
return Integer(0)
# We make a generator so as to only generate a variable when necessary.
# If a high order of derivative is requested and the expr becomes 0
# after a few differentiations, then we won't need the other variables.
variablegen = (v for v, count in variable_count for i in range(count))
# If we can't compute the derivative of expr (but we wanted to) and
# expr is itself not a Derivative, finish building an unevaluated
# derivative class by calling Expr.__new__.
if (not (hasattr(expr, '_eval_derivative') and evaluate) and
(not isinstance(expr, Derivative))):
variables = list(variablegen)
# If we wanted to evaluate, we sort the variables into standard
# order for later comparisons. This is too aggressive if evaluate
# is False, so we don't do it in that case.
if evaluate:
# TODO: check if assumption of discontinuous derivatives exist
variables = cls._sort_variables(variables)
# Here we *don't* need to reinject evaluate into assumptions
# because we are done with it and it is not an assumption that
# Expr knows about.
obj = Expr.__new__(cls, expr, *variables, **assumptions)
return obj
# Compute the derivative now by repeatedly calling the
# _eval_derivative method of expr for each variable. When this method
# returns None, the derivative couldn't be computed wrt that variable
# and we save the variable for later.
unhandled_variables = []
# Once we encouter a non_symbol that is unhandled, we stop taking
# derivatives entirely. This is because derivatives wrt functions
# don't commute with derivatives wrt symbols and we can't safely
# continue.
unhandled_non_symbol = False
nderivs = 0 # how many derivatives were performed
for v in variablegen:
is_symbol = v.is_Symbol
if unhandled_non_symbol:
obj = None
else:
if not is_symbol:
new_v = Dummy(f'xi_{i:d}')
new_v.dummy_index = hash(v)
expr = expr.xreplace({v: new_v})
old_v = v
v = new_v
obj = expr._eval_derivative(v)
nderivs += 1
if not is_symbol:
if obj is not None:
if obj.is_Derivative and not old_v.is_Symbol:
# Derivative evaluated at a generic point, i.e.
# that is not a symbol.
obj = Subs(obj, (v, old_v))
else:
obj = obj.xreplace({v: old_v})
v = old_v
if obj is None:
unhandled_variables.append(v)
if not is_symbol:
unhandled_non_symbol = True
elif obj == 0:
return Integer(0)
else:
expr = obj
if unhandled_variables:
unhandled_variables = cls._sort_variables(unhandled_variables)
expr = Expr.__new__(cls, expr, *unhandled_variables, **assumptions)
else:
# We got a Derivative at the end of it all, and we rebuild it by
# sorting its variables.
if isinstance(expr, Derivative):
expr = cls(
expr.expr, *cls._sort_variables(expr.variables)
)
if nderivs > 1 and assumptions.get('simplify', True):
from ..simplify.simplify import signsimp
from .exprtools import factor_terms
expr = factor_terms(signsimp(expr))
return expr
@classmethod
def _sort_variables(cls, vars):
"""Sort variables, but disallow sorting of non-symbols.
When taking derivatives, the following rules usually hold:
* Derivative wrt different symbols commute.
* Derivative wrt different non-symbols commute.
* Derivatives wrt symbols and non-symbols don't commute.
Examples
========
>>> vsort = Derivative._sort_variables
>>> vsort((x, y, z))
[x, y, z]
>>> vsort((h(x), g(x), f(x)))
[f(x), g(x), h(x)]
>>> vsort((z, y, x, h(x), g(x), f(x)))
[x, y, z, f(x), g(x), h(x)]
>>> vsort((x, f(x), y, f(y)))
[x, f(x), y, f(y)]
>>> vsort((y, x, g(x), f(x), z, h(x), y, x))
[x, y, f(x), g(x), z, h(x), x, y]
>>> vsort((z, y, f(x), x, f(x), g(x)))
[y, z, f(x), x, f(x), g(x)]
>>> vsort((z, y, f(x), x, f(x), g(x), z, z, y, x))
[y, z, f(x), x, f(x), g(x), x, y, z, z]
"""
sorted_vars = []
symbol_part = []
non_symbol_part = []
for v in vars:
if not v.is_Symbol:
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,
key=default_sort_key))
symbol_part = []
non_symbol_part.append(v)
else:
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,
key=default_sort_key))
non_symbol_part = []
symbol_part.append(v)
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,
key=default_sort_key))
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,
key=default_sort_key))
return sorted_vars
def _eval_is_commutative(self):
return self.expr.is_commutative
def _eval_derivative(self, v):
# If the variable s we are diff wrt is not in self.variables, we
# assume that we might be able to take the derivative.
if v not in self.variables:
obj = self.expr.diff(v)
if obj == 0:
return Integer(0)
if isinstance(obj, Derivative):
return obj.func(obj.expr, *(self.variables + obj.variables))
# The derivative wrt s could have simplified things such that the
# derivative wrt things in self.variables can now be done. Thus,
# we set evaluate=True to see if there are any other derivatives
# that can be done. The most common case is when obj is a simple
# number so that the derivative wrt anything else will vanish.
return self.func(obj, *self.variables, evaluate=True)
# In this case s was in self.variables so the derivatve wrt s has
# already been attempted and was not computed, either because it
# couldn't be or evaluate=False originally.
return self.func(self.expr, *(self.variables + (v, )), evaluate=False)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default.
See Also
========
diofant.core.basic.Basic.doit
"""
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
return self.func(expr, *self.variables, **hints)
@_sympifyit('z0', NotImplementedError)
def doit_numerically(self, z0):
"""
Evaluate the derivative at z numerically.
When we can represent derivatives at a point, this should be folded
into the normal evalf. For now, we need a special method.
"""
import mpmath
from .expr import Expr
if len(self.free_symbols) != 1 or len(self.variables) != 1:
raise NotImplementedError('partials and higher order derivatives')
z = list(self.free_symbols)[0]
def eval(x):
f0 = self.expr.subs({z: Expr._from_mpmath(x, prec=mpmath.mp.prec)})
f0 = f0.evalf(mlib.libmpf.prec_to_dps(mpmath.mp.prec), strict=False)
return f0._to_mpmath(mpmath.mp.prec)
return Expr._from_mpmath(mpmath.diff(eval,
z0._to_mpmath(mpmath.mp.prec)),
mpmath.mp.prec)
@property
def expr(self):
"""Return expression."""
return self.args[0]
@property
def variables(self):
"""Return tuple of symbols, wrt derivative is taken."""
return self.args[1:]
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
See Also
========
diofant.core.basic.Basic.free_symbols
"""
return self.expr.free_symbols
def _eval_subs(self, old, new):
if old in self.variables and not new._diff_wrt:
# issue sympy/sympy#4719
return Subs(self, (old, new))
# If both are Derivatives with the same expr, check if old is
# equivalent to self or if old is a subderivative of self.
if old.is_Derivative and old.expr == self.expr:
# Check if canonnical order of variables is equal.
old_vars = collections.Counter(old.variables)
self_vars = collections.Counter(self.variables)
if old_vars == self_vars:
return new
# collections.Counter doesn't have __le__
def _subset(a, b):
return all(a[i] <= b[i] for i in a)
if _subset(old_vars, self_vars):
return Derivative(new, *(self_vars - old_vars).elements())
return Derivative(*(x._subs(old, new) for x in self.args))
def _eval_lseries(self, x, logx):
for term in self.expr.series(x, n=None, logx=logx):
yield self.func(term, *self.variables)
def _eval_nseries(self, x, n, logx):
arg = self.expr.nseries(x, n=n, logx=logx)
o = arg.getO()
rv = [self.func(a, *self.variables) for a in Add.make_args(arg.removeO())]
if o:
rv.append(o/x)
return Add(*rv)
def _eval_as_leading_term(self, x):
return self.func(self.expr.as_leading_term(x), *self.variables)
class Lambda(Expr):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
A simple example:
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> f2 = Lambda((x, y, z, t), x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
is_Function = True
def __new__(cls, variables, expr):
from ..sets.sets import FiniteSet
v = list(variables) if iterable(variables) else [variables]
for i in v:
if not getattr(i, 'is_Symbol', False):
raise TypeError(f'variable is not a symbol: {i}')
if len(v) == 1 and v[0] == expr:
return S.IdentityFunction
obj = Expr.__new__(cls, Tuple(*v), sympify(expr))
obj.nargs = FiniteSet(len(v))
return obj
@property
def variables(self):
"""The variables used in the internal representation of the function."""
return self.args[0]
@property
def expr(self):
"""The return value of the function."""
return self.args[1]
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
See Also
========
diofant.core.basic.Basic.free_symbols
"""
return self.expr.free_symbols - set(self.variables)
def __call__(self, *args):
n = len(args)
if n not in self.nargs: # Lambda only ever has 1 value in nargs
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
# XXX does this apply to Lambda? If not, remove this comment.
temp = ('%(name)s takes exactly %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': self,
'args': list(self.nargs)[0],
'plural': 's'*(list(self.nargs)[0] != 1),
'given': n})
return self.expr.xreplace(dict(zip(self.variables, args)))
def __eq__(self, other):
if not isinstance(other, Lambda):
return False
if self.nargs != other.nargs:
return False
selfexpr = self.args[1]
otherexpr = other.args[1]
otherexpr = otherexpr.xreplace(dict(zip(other.args[0], self.args[0])))
return selfexpr == otherexpr
def __hash__(self):
return super().__hash__()
def _hashable_content(self):
return self.expr.xreplace(self.canonical_variables),
class Subs(Expr):
"""
Represents unevaluated substitutions of an expression.
``Subs`` receives at least 2 arguments: an expression, a pair of old
and new expression to substitute or several such pairs.
``Subs`` objects are generally useful to represent unevaluated derivatives
calculated at a point.
The variables may be expressions, but they are subjected to the limitations
of subs(), so it is usually a good practice to use only symbols for
variables, since in that case there can be no ambiguity.
There's no automatic expansion - use the method .doit() to effect all
possible substitutions of the object and also of objects inside the
expression.
When evaluating derivatives at a point that is not a symbol, a Subs object
is returned. One is also able to calculate derivatives of Subs objects - in
this case the expression is always expanded (for the unevaluated form, use
Derivative()).
Examples
========
>>> e = Subs(f(x).diff(x), (x, y))
>>> e.subs({y: 0})
Subs(Derivative(f(x), x), (x, 0))
>>> e.subs({f: sin}).doit()
cos(y)
>>> Subs(f(x)*sin(y) + z, (x, 0), (y, 1))
Subs(z + f(x)*sin(y), (x, 0), (y, 1))
>>> _.doit()
z + f(0)*sin(1)
"""
def __new__(cls, expr, *args, **assumptions):
from .symbol import Symbol
args = sympify(args)
if len(args) and all(is_sequence(_) and len(_) == 2 for _ in args):
variables, point = zip(*args)
else:
raise ValueError('Subs support two or more arguments')
if tuple(uniq(variables)) != variables:
repeated = [ v for v in set(variables) if variables.count(v) > 1 ]
raise ValueError('cannot substitute expressions %s more than '
'once.' % repeated)
expr = sympify(expr)
# use symbols with names equal to the point value (with preppended _)
# to give a variable-independent expression
pre = '_'
pts = sorted(set(point), key=default_sort_key)
from ..printing import StrPrinter
class CustomStrPrinter(StrPrinter):
def _print_Dummy(self, expr):
return str(expr) + str(expr.dummy_index)
def mystr(expr, **settings):
p = CustomStrPrinter(settings)
return p.doprint(expr)
while 1:
s_pts = {p: Symbol(pre + mystr(p)) for p in pts}
reps = [(v, s_pts[p])
for v, p in zip(variables, point)]
# if any underscore-preppended symbol is already a free symbol
# and is a variable with a different point value, then there
# is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, 1), (_1, 0))
# because the new symbol that would be created is _1 but _1
# is already mapped to 0 so __0 and __1 are used for the new
# symbols
if any(r in expr.free_symbols and
r in variables and
Symbol(pre + mystr(point[variables.index(r)])) != r
for _, r in reps):
pre += '_'
continue
reps # XXX "peephole" optimization, http://bugs.python.org/issue2506
break
obj = Expr.__new__(cls, expr, *sympify(tuple(zip(variables, point))))
obj._expr = expr.subs(reps)
return obj
def _eval_is_commutative(self):
return (self.expr.is_commutative and
all(p.is_commutative for p in self.point))
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default.
See Also
========
diofant.core.basic.Basic.doit
"""
return self.expr.doit(**hints).subs(list(zip(self.variables, self.point)))
def evalf(self, dps=15, **options):
"""Evaluate the given formula to an accuracy of dps decimal digits.
See Also
========
diofant.core.evalf.EvalfMixin.evalf
"""
return self.doit().evalf(dps, **options)
#:
n = evalf
@property
def variables(self):
"""The variables to be evaluated."""
return Tuple(*tuple(zip(*self.args[1:])))[0]
@property
def expr(self):
"""The expression on which the substitution operates."""
return self.args[0]
@property
def point(self):
"""The values for which the variables are to be substituted."""
return Tuple(*tuple(zip(*self.args[1:])))[1]
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
See Also
========
diofant.core.basic.Basic.free_symbols
"""
return (self.expr.free_symbols - set(self.variables) |
set(self.point.free_symbols))
def __eq__(self, other):
if not isinstance(other, Subs):
return False
return self._expr == other._expr
def __hash__(self):
return super().__hash__()
def _hashable_content(self):
return self._expr.xreplace(self.canonical_variables),
def _eval_subs(self, old, new):
if old in self.variables:
return self
if isinstance(old, Subs) and self.point == old.point:
if self.expr.subs(zip(self.variables, old.variables)) == old.expr:
return new
def _eval_derivative(self, s):
return Add((self.func(self.expr.diff(s), *self.args[1:]).doit()
if s not in self.variables else Integer(0)),
*[p.diff(s)*self.func(self.expr.diff(v), *self.args[1:]).doit()
for v, p in zip(self.variables, self.point)])
def diff(f, *args, **kwargs):
"""
Differentiate f with respect to symbols.
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), (x, 3)) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), (x, 0)), then the result will
be the function (the zeroth derivative), even if evaluate=False.
Examples
========
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
Derivative(f(x), x, x, x)
>>> diff(f(x), (x, 3))
Derivative(f(x), x, x, x)
>>> diff(sin(x)*cos(y), (x, 2), (y, 2))
sin(x)*cos(y)
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'diofant.core.function.Derivative'>
>>> type(diff(sin(x), (x, 0)))
sin
>>> type(diff(sin(x), (x, 0), evaluate=False))
sin
>>> diff(sin(x))
cos(x)
>>> diff(sin(x*y))
Traceback (most recent call last):
...
ValueError: specify differentiation variables to differentiate sin(x*y)
Note that ``diff(sin(x))`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
References
==========
* https://reference.wolfram.com/legacy/v5_2/Built-inFunctions/AlgebraicComputation/Calculus/D.html
See Also
========
Derivative
diofant.geometry.util.idiff: computes the derivative implicitly
"""
kwargs.setdefault('evaluate', True)
return Derivative(f, *args, **kwargs)
def expand(e, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
r"""Expand an expression using methods given as hints.
Hints evaluated unless explicitly set to False are: ``basic``, ``log``,
``multinomial``, ``mul``, ``power_base``, and ``power_exp``. The following
hints are supported but not applied unless set to True: ``complex``,
``func``, and ``trig``. In addition, the following meta-hints are
supported by some or all of the other hints: ``frac``, ``numer``,
``denom``, ``modulus``, and ``force``. ``deep`` is supported by all
hints. Additionally, subclasses of Expr may define their own hints or
meta-hints.
Parameters
==========
basic : boolean, optional
This hint is used for any special
rewriting of an object that should be done automatically (along with
the other hints like ``mul``) when expand is called. This is a catch-all
hint to handle any sort of expansion that may not be described by
the existing hint names.
deep : boolean, optional
If ``deep`` is set to ``True`` (the default), things like arguments of
functions are recursively expanded. Use ``deep=False`` to only expand on
the top level.
mul : boolean, optional
Distributes multiplication over addition (``):
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
multinomial : boolean, optional
Expand (x + y + ...)**n where n is a positive integer.
>>> ((x + y + z)**2).expand(multinomial=True)
x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2
power_exp : boolean, optional
Expand addition in exponents into multiplied bases.
>>> exp(x + y).expand(power_exp=True)
E**x*E**y
>>> (2**(x + y)).expand(power_exp=True)
2**x*2**y
power_base : boolean, optional
Split powers of multiplied bases.
This only happens by default if assumptions allow, or if the
``force`` meta-hint is used:
>>> ((x*y)**z).expand(power_base=True)
(x*y)**z
>>> ((x*y)**z).expand(power_base=True, force=True)
x**z*y**z
>>> ((2*y)**z).expand(power_base=True)
2**z*y**z
Note that in some cases where this expansion always holds, Diofant performs
it automatically:
>>> (x*y)**2
x**2*y**2
log : boolean, optional
Pull out power of an argument as a coefficient and split logs products
into sums of logs.
Note that these only work if the arguments of the log function have the
proper assumptions--the arguments must be positive and the exponents must
be real--or else the ``force`` hint must be True:
>>> log(x**2*y).expand(log=True)
log(x**2*y)
>>> log(x**2*y).expand(log=True, force=True)
2*log(x) + log(y)
>>> x, y = symbols('x y', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
complex : boolean, optional
Split an expression into real and imaginary parts.
>>> x, y = symbols('x y')
>>> (x + y).expand(complex=True)
re(x) + re(y) + I*im(x) + I*im(y)
>>> cos(x).expand(complex=True)
-I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x))
Note that this is just a wrapper around ``as_real_imag()``. Most objects
that wish to redefine ``_eval_expand_complex()`` should consider
redefining ``as_real_imag()`` instead.
func : boolean : optional
Expand other functions.
>>> gamma(x + 1).expand(func=True)
x*gamma(x)
trig : boolean, optional
Do trigonometric expansions.
>>> cos(x + y).expand(trig=True)
-sin(x)*sin(y) + cos(x)*cos(y)
>>> sin(2*x).expand(trig=True)
2*sin(x)*cos(x)
Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)``
and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x)
= 1`. The current implementation uses the form obtained from Chebyshev
polynomials, but this may change.
force : boolean, optional
If the ``force`` hint is used, assumptions about variables will be ignored
in making the expansion.
Notes
=====
- You can shut off unwanted methods::
>>> (exp(x + y)*(x + y)).expand()
E**x*E**y*x + E**x*E**y*y
>>> (exp(x + y)*(x + y)).expand(power_exp=False)
E**(x + y)*x + E**(x + y)*y
>>> (exp(x + y)*(x + y)).expand(mul=False)
E**x*E**y*(x + y)
- Use deep=False to only expand on the top level::
>>> exp(x + exp(x + y)).expand()
E**x*E**(E**x*E**y)
>>> exp(x + exp(x + y)).expand(deep=False)
E**(E**(x + y))*E**x
- Hints are applied in an arbitrary, but consistent order (in the current
implementation, they are applied in alphabetical order, except
multinomial comes before mul, but this may change). Because of this,
some hints may prevent expansion by other hints if they are applied
first. For example, ``mul`` may distribute multiplications and prevent
``log`` and ``power_base`` from expanding them. Also, if ``mul`` is
applied before ``multinomial``, the expression might not be fully
distributed. The solution is to use the various ``expand_hint`` helper
functions or to use ``hint=False`` to this function to finely control
which hints are applied. Here are some examples::
>>> x, y, z = symbols('x y z', positive=True)
>>> expand(log(x*(y + z)))
log(x) + log(y + z)
Here, we see that ``log`` was applied before ``mul``. To get the mul
expanded form, either of the following will work::
>>> expand_mul(log(x*(y + z)))
log(x*y + x*z)
>>> expand(log(x*(y + z)), log=False)
log(x*y + x*z)
A similar thing can happen with the ``power_base`` hint::
>>> expand((x*(y + z))**x)
(x*y + x*z)**x
To get the ``power_base`` expanded form, either of the following will
work::
>>> expand((x*(y + z))**x, mul=False)
x**x*(y + z)**x
>>> expand_power_base((x*(y + z))**x)
x**x*(y + z)**x
>>> expand((x + y)*y/x)
y + y**2/x
The parts of a rational expression can be targeted::
>>> expand((x + y)*y/x/(x + 1), frac=True)
(x*y + y**2)/(x**2 + x)
>>> expand((x + y)*y/x/(x + 1), numer=True)
(x*y + y**2)/(x*(x + 1))
>>> expand((x + y)*y/x/(x + 1), denom=True)
y*(x + y)/(x**2 + x)
- The ``modulus`` meta-hint can be used to reduce the coefficients of an
expression post-expansion::
>>> expand((3*x + 1)**2)
9*x**2 + 6*x + 1
>>> expand((3*x + 1)**2, modulus=5)
4*x**2 + x + 1
- Either ``expand()`` the function or ``.expand()`` the method can be
used. Both are equivalent::
>>> expand((x + 1)**2)
x**2 + 2*x + 1
>>> ((x + 1)**2).expand()
x**2 + 2*x + 1
- Objects can define their own expand hints by defining
``_eval_expand_hint()``. The function should take the form::
def _eval_expand_hint(self, **hints):
# Only apply the method to the top-level expression
...
See also the example below. Objects should define ``_eval_expand_hint()``
methods only if ``hint`` applies to that specific object. The generic
``_eval_expand_hint()`` method defined in Expr will handle the no-op case.
Each hint should be responsible for expanding that hint only.
Furthermore, the expansion should be applied to the top-level expression
only. ``expand()`` takes care of the recursion that happens when
``deep=True``.
You should only call ``_eval_expand_hint()`` methods directly if you are
100% sure that the object has the method, as otherwise you are liable to
get unexpected ``AttributeError``'s. Note, again, that you do not need to
recursively apply the hint to args of your object: this is handled
automatically by ``expand()``. ``_eval_expand_hint()`` should
generally not be used at all outside of an ``_eval_expand_hint()`` method.
If you want to apply a specific expansion from within another method, use
the public ``expand()`` function, method, or ``expand_hint()`` functions.
In order for expand to work, objects must be rebuildable by their args,
i.e., ``obj.func(*obj.args) == obj`` must hold.
Expand methods are passed ``**hints`` so that expand hints may use
'metahints'--hints that control how different expand methods are applied.
For example, the ``force=True`` hint described above that causes
``expand(log=True)`` to ignore assumptions is such a metahint. The
``deep`` meta-hint is handled exclusively by ``expand()`` and is not
passed to ``_eval_expand_hint()`` methods.
Note that expansion hints should generally be methods that perform some
kind of 'expansion'. For hints that simply rewrite an expression, use the
.rewrite() API.
Examples
========
>>> class MyClass(Expr):
... def __new__(cls, *args):
... args = sympify(args)
... return Expr.__new__(cls, *args)
...
... def _eval_expand_double(self, **hints):
... # Doubles the args of MyClass.
... # If there more than four args, doubling is not performed,
... # unless force=True is also used (False by default).
... force = hints.pop('force', False)
... if not force and len(self.args) > 4:
... return self
... return self.func(*(self.args + self.args))
...
>>> a = MyClass(1, 2, MyClass(3, 4))
>>> a
MyClass(1, 2, MyClass(3, 4))
>>> a.expand(double=True)
MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4))
>>> a.expand(double=True, deep=False)
MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4))
>>> b = MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True)
MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True, force=True)
MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5)
See Also
========
expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig,
expand_power_base, expand_power_exp, expand_func,
diofant.simplify.hyperexpand.hyperexpand
References
==========
* https://mathworld.wolfram.com/Multiple-AngleFormulas.html
"""
# don't modify this; modify the Expr.expand method
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, modulus=modulus, **hints)
# This is a special application of two hints
def _mexpand(expr, recursive=False):
# expand multinomials and then expand products; this may not always
# be sufficient to give a fully expanded expression (see
# test_sympyissue_8247_8354 in test_arit)
was = None
while was != expr:
was, expr = expr, expand_mul(expand_multinomial(expr))
if not recursive:
break
return expr
# These are simple wrappers around single hints.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Examples
========
>>> x, y = symbols('x y', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
E**(x + y)*x*log(x*y**2) + E**(x + y)*y*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,
power_base=False, basic=False, multinomial=False, log=False)
def expand_multinomial(expr, deep=True):
"""
Wrapper around expand that only uses the multinomial hint. See the expand
docstring for more information.
Examples
========
>>> x, y = symbols('x y', positive=True)
>>> expand_multinomial((x + exp(x + 1))**2)
2*E**(x + 1)*x + E**(2*x + 2) + x**2
"""
return sympify(expr).expand(deep=deep, mul=False, power_exp=False,
power_base=False, basic=False, multinomial=True, log=False)
def expand_log(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Examples
========
>>> x, y = symbols('x y', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
E**(x + y)*(x + y)*(log(x) + 2*log(y))
"""
return sympify(expr).expand(deep=deep, log=True, mul=False,
power_exp=False, power_base=False, multinomial=False,
basic=False, force=force)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Examples
========
>>> expand_func(gamma(x + 2))
x*(x + 1)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Examples
========
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(sin(x)*cos(y) + sin(y)*cos(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Examples
========
>>> expand_complex(exp(z))
E**re(z)*I*sin(im(z)) + E**re(z)*cos(im(z))
>>> expand_complex(sqrt(I))
sqrt(2)/2 + sqrt(2)*I/2
See Also
========
diofant.core.expr.Expr.as_real_imag
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_power_base(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the power_base hint.
A wrapper to expand(power_base=True) which separates a power with a base
that is a Mul into a product of powers, without performing any other
expansions, provided that assumptions about the power's base and exponent
allow.
deep=False (default is True) will only apply to the top-level expression.
force=True (default is False) will cause the expansion to ignore
assumptions about the base and exponent. When False, the expansion will
only happen if the base is non-negative or the exponent is an integer.
>>> (x*y)**2
x**2*y**2
>>> (2*x)**y
(2*x)**y
>>> expand_power_base(_)
2**y*x**y
>>> expand_power_base((x*y)**z)
(x*y)**z
>>> expand_power_base((x*y)**z, force=True)
x**z*y**z
>>> expand_power_base(sin((x*y)**z), deep=False)
sin((x*y)**z)
>>> expand_power_base(sin((x*y)**z), force=True)
sin(x**z*y**z)
>>> expand_power_base((2*sin(x))**y + (2*cos(x))**y)
2**y*sin(x)**y + 2**y*cos(x)**y
>>> expand_power_base((2*exp(y))**x)
2**x*(E**y)**x
>>> expand_power_base((2*cos(x))**y)
2**y*cos(x)**y
Notice that sums are left untouched. If this is not the desired behavior,
apply full ``expand()`` to the expression:
>>> expand_power_base(((x+y)*z)**2)
z**2*(x + y)**2
>>> (((x+y)*z)**2).expand()
x**2*z**2 + 2*x*y*z**2 + y**2*z**2
>>> expand_power_base((2*y)**(1+z))
2**(z + 1)*y**(z + 1)
>>> ((2*y)**(1+z)).expand()
2*2**z*y*y**z
See Also
========
expand
"""
return sympify(expr).expand(deep=deep, log=False, mul=False,
power_exp=False, power_base=True, multinomial=False,
basic=False, force=force)
def expand_power_exp(expr, deep=True):
"""
Wrapper around expand that only uses the power_exp hint.
Examples
========
>>> expand_power_exp(x**(y + 2))
x**2*x**y
See Also
========
expand
"""
return sympify(expr).expand(deep=deep, complex=False, basic=False,
log=False, mul=False, power_exp=True, power_base=False, multinomial=False)
def count_ops(expr, visual=False):
"""
Return a representation (integer or expression) of the operations in expr.
If ``visual`` is ``False`` (default) then the sum of the coefficients of the
visual expression will be returned.
If ``visual`` is ``True`` then the number of each type of operation is shown
with the core class types (or their virtual equivalent) multiplied by the
number of times they occur.
If expr is an iterable, the sum of the op counts of the
items will be returned.
Examples
========
Although there isn't a SUB object, minus signs are interpreted as
either negations or subtractions:
>>> (x - y).count_ops(visual=True)
SUB
>>> (-x).count_ops(visual=True)
NEG
Here, there are two Adds and a Pow:
>>> (1 + a + b**2).count_ops(visual=True)
2*ADD + POW
In the following, an Add, Mul, Pow and two functions:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=True)
ADD + MUL + POW + 2*SIN
for a total of 5:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=False)
5
Note that "what you type" is not always what you get. The expression
1/x/y is translated by diofant into 1/(x*y) so it gives a DIV and MUL rather
than two DIVs:
>>> (1/x/y).count_ops(visual=True)
DIV + MUL
The visual option can be used to demonstrate the difference in
operations for expressions in different forms. Here, the Horner
representation is compared with the expanded form of a polynomial:
>>> eq = x*(1 + x*(2 + x*(3 + x)))
>>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True)
-MUL + 3*POW
The count_ops function also handles iterables:
>>> count_ops([x, sin(x), None, True, x + 2], visual=False)
2
>>> count_ops([x, sin(x), None, True, x + 2], visual=True)
ADD + SIN
>>> count_ops({x: sin(x), x + 2: y + 1}, visual=True)
2*ADD + SIN
"""
from ..integrals import Integral
from ..logic.boolalg import BooleanFunction
from ..simplify.radsimp import fraction
from .symbol import Symbol
expr = sympify(expr)
if type(expr) is dict:
ops = [count_ops(k, visual=visual) +
count_ops(v, visual=visual) for k, v in expr.items()]
elif iterable(expr):
ops = [count_ops(i, visual=visual) for i in expr]
elif isinstance(expr, Expr):
ops = []
args = [expr]
NEG = Symbol('NEG')
DIV = Symbol('DIV')
SUB = Symbol('SUB')
ADD = Symbol('ADD')
while args:
a = args.pop()
if a.is_Rational:
# -1/3 = NEG + DIV
if a != 1:
if a.numerator < 0:
ops.append(NEG)
if a.denominator != 1:
ops.append(DIV)
# XXX "peephole" optimization, http://bugs.python.org/issue2506
a
continue
elif a.is_Mul:
if _coeff_isneg(a):
ops.append(NEG)
if a.args[0] == -1:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops.append(DIV)
args.append(d)
continue # won't be -Mul but could be Add
elif d != 1:
if not d.is_Integer:
args.append(d)
ops.append(DIV)
args.append(n)
continue # could be -Mul
elif a.is_Add:
aargs = list(a.args)
negs = 0
for i, ai in enumerate(aargs):
if _coeff_isneg(ai):
negs += 1
args.append(-ai)
if i > 0:
ops.append(SUB)
else:
args.append(ai)
if i > 0:
ops.append(ADD)
if negs == len(aargs): # -x - y = NEG + SUB
ops.append(NEG)
elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD
ops.append(SUB - ADD)
# XXX "peephole" optimization, http://bugs.python.org/issue2506
a
continue
elif isinstance(expr, BooleanFunction):
ops = []
for arg in expr.args:
ops.append(count_ops(arg, visual=True))
o = Symbol(expr.func.__name__.upper())
ops.append(o)
continue
if a.is_Pow and a.exp == -1:
ops.append(DIV)
args.append(a.base) # won't be -Mul but could be Add
continue
if (a.is_Mul or
a.is_Pow or
a.is_Function or
isinstance(a, Derivative) or
isinstance(a, Integral)):
o = Symbol(a.func.__name__.upper())
# count the args
if (a.is_Mul or isinstance(a, LatticeOp)):
ops.append(o*(len(a.args) - 1))
else:
ops.append(o)
if not a.is_Symbol:
args.extend(a.args)
elif not isinstance(expr, Basic):
ops = []
else:
ops = []
args = [expr]
while args:
a = args.pop()
if a.args:
o = Symbol(a.func.__name__.upper())
ops.append(o)
args.extend(a.args)
if not ops:
if visual:
return Integer(0)
return 0
ops = Add(*ops)
if visual:
return ops
if ops.is_Number:
return int(ops)
return sum(int((a.args or [1])[0]) for a in Add.make_args(ops))
def nfloat(expr, n=15, exponent=False):
"""Make all Rationals in expr Floats except those in exponents
(unless the exponents flag is set to True).
Examples
========
>>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y))
x**4 + 0.5*x + sqrt(y) + 1.5
>>> nfloat(x**4 + sqrt(y), exponent=True)
x**4.0 + y**0.5
"""
from ..polys.rootoftools import RootOf
from .power import Pow
from .symbol import Dummy
if iterable(expr, exclude=(str,)):
if isinstance(expr, (dict, Dict)):
return type(expr)([(k, nfloat(v, n, exponent)) for k, v in
list(expr.items())])
return type(expr)([nfloat(a, n, exponent) for a in expr])
rv = sympify(expr)
if rv.is_Number:
return Float(rv, n)
elif rv.is_number:
# evalf doesn't always set the precision
rv = rv.evalf(n)
if rv.is_Number:
rv = Float(rv, n)
else:
pass # pure_complex(rv) is likely True
return rv
# watch out for RootOf instances that don't like to have
# their exponents replaced with Dummies and also sometimes have
# problems with evaluating at low precision (issue sympy/sympy#6393)
rv = rv.xreplace({ro: ro.evalf(n) for ro in rv.atoms(RootOf)})
if not exponent:
reps = [(p, Pow(p.base, Dummy())) for p in rv.atoms(Pow)]
rv = rv.xreplace(dict(reps))
rv = rv.evalf(n, strict=False)
if not exponent:
rv = rv.xreplace({d.exp: p.exp for p, d in reps})
else:
# Pow._eval_evalf special cases Integer exponents so if
# exponent is suppose to be handled we have to do so here
rv = rv.xreplace(Transform(
lambda x: Pow(x.base, Float(x.exp, n)),
lambda x: x.is_Pow and x.exp.is_Integer))
return rv.xreplace(Transform(
lambda x: x.func(*nfloat(x.args, n, exponent)),
lambda x: isinstance(x, Function)))
| bsd-3-clause | -4,774,232,828,554,274,000 | 32.3727 | 107 | 0.553057 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.