code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
a_l = [0, 1, 2]
b_l = [10, 20, 30]
a_t = (0, 1, 2)
b_t = (10, 20, 30)
a_s = 'abc'
b_s = 'xyz'
print(a_l + b_l)
# [0, 1, 2, 10, 20, 30]
print(a_t + b_t)
# (0, 1, 2, 10, 20, 30)
print(a_s + b_s)
# abcxyz
# print(a_l + 3)
# TypeError: can only concatenate list (not "int") to list
print(a_l + [3])
# [0, 1, 2, 3]
# print(a_t + (3))
# TypeError: can only concatenate tuple (not "int") to tuple
print(a_t + (3, ))
# (0, 1, 2, 3)
a_l += b_l
print(a_l)
# [0, 1, 2, 10, 20, 30]
a_t += b_t
print(a_t)
# (0, 1, 2, 10, 20, 30)
a_s += b_s
print(a_s)
# abcxyz
print(b_l * 3)
# [10, 20, 30, 10, 20, 30, 10, 20, 30]
print(3 * b_l)
# [10, 20, 30, 10, 20, 30, 10, 20, 30]
print(b_t * 3)
# (10, 20, 30, 10, 20, 30, 10, 20, 30)
print(3 * b_t)
# (10, 20, 30, 10, 20, 30, 10, 20, 30)
print(b_s * 3)
# xyzxyzxyz
print(3 * b_s)
# xyzxyzxyz
# print(b_l * 0.5)
# TypeError: can't multiply sequence by non-int of type 'float'
print(b_l * -1)
# []
b_l *= 3
print(b_l)
# [10, 20, 30, 10, 20, 30, 10, 20, 30]
b_t *= 3
print(b_t)
# (10, 20, 30, 10, 20, 30, 10, 20, 30)
b_s *= 3
print(b_s)
# xyzxyzxyz
a_l = [0, 1, 2]
b_l = [10, 20, 30]
c_l = a_l + b_l * 3
print(c_l)
# [0, 1, 2, 10, 20, 30, 10, 20, 30, 10, 20, 30]
| nkmk/python-snippets | notebook/arithmetic_operator_list_tuple_str.py | Python | mit | 1,210 |
"""Interface for stack manifest YAML files written by buildlsstsw.sh
The purpose of these YAML files is to isolate ltd-mason from Eups/Scons
builds of the software itself and to merely tell ltd-mason where the built
software can be found, and metadata about the versioning of this Stack.
"""
import abc
from urllib.parse import urlparse, urlunparse
import os
from pathlib import Path
import jsonschema
import ruamel.yaml
class BaseManifest(object):
"""Abstract base class defining the API for a Manifest.
Manifests specify the input parameters for a documentation build.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def doc_repo_url(self):
"""Git URL for the product's Git documentation repository."""
return
@abc.abstractproperty
def doc_repo_ref(self):
"""Git ref (branch, commit, tag) for the product's Git documentation
repository (:class:`str`).
"""
return
@property
def doc_repo_name(self):
"""Name of the product's Git documentation repository (:class:`str`).
For example, a doc repository at
``'https://github.com/lsst-sqre/pipelines_doc.git'`` is named
``'pipelines_doc'``.
"""
parts = urlparse(self.doc_repo_url)
return os.path.splitext(parts.path)[0].split('/')[-1]
@abc.abstractproperty
def product_name(self):
"""Name of the documentation product for LTD Keeper."""
return
@abc.abstractproperty
def build_id(self):
"""Build identifier (`str`)."""
return
@abc.abstractproperty
def requester_github_handle(self):
"""GitHub username handle of person who triggered the build. `None`
if not available.
"""
return
@abc.abstractproperty
def refs(self):
"""`list` of Git refs that define the overal version set of the
products.
"""
return
@abc.abstractproperty
def packages(self):
"""Dictionary of package names as keys and package data as values.
Package data is a dict with keys:
- ``'dir'``: directory where the package was installed by lsstsw. This
is ensured to be an absolute URL, transforming any relative paths
in the Manifest, assuming they are relative to the **current
working directory.**
- ``'url'``: Git repository URL.
- ``'ref'``: Git reference for package (branch, commit, tag).
"""
return
class Manifest(BaseManifest):
"""Representation of a YAML-encoded manifest for an LSST stack product.
Parameters
----------
yaml_data : str
Stack manifest, encoded as YAML text.
Attributes
----------
data : `ruamel.yaml` object
Manifest dataset loaded by :mod:`ruamel.yamel`.
"""
def __init__(self, f):
super().__init__()
yaml = ruamel.yaml.YAML()
data = yaml.load(f)
Manifest.validate(data)
self.data = data
@property
def yaml(self):
"""YAML representation of the manifest (:class:`str`)."""
yaml = ruamel.yaml.YAML()
return yaml.dump(self.data)
@property
def doc_repo_url(self):
"""Git URL for the product's Git documentation repository."""
return self.data['doc_repo']['url']
@property
def doc_repo_ref(self):
"""Git ref (branch, commit, tag) for the product's Git documentation
repository (:class:`str`).
"""
return self.data['doc_repo']['ref']
@property
def product_name(self):
"""Name of the documentation product."""
return self.data['product_name']
@property
def build_id(self):
"""Build identifier (`str`)."""
return self.data['build_id']
@property
def requester_github_handle(self):
"""GitHub username handle of person who triggered the build. `None`
if not available.
"""
if 'requester_github_handle' in self.data:
return self.data['requester_github_handle']
else:
return None
@property
def refs(self):
"""`list` of Git refs that define the overal version set of the
products.
"""
return self.data['refs']
@property
def packages(self):
"""Dictionary of package names as keys and package data as values.
Package data is a dict with keys:
- ``'dir'``: directory where the package was installed by lsstsw. This
is ensured to be an absolute URL, transforming any relative paths
in the Manifest, assuming they are relative to the **current
working directory.**
- ``'url'``: Git repository URL.
- ``'ref'``: Git reference for package (branch, commit, tag).
"""
data = {}
for pkg_name, pkg_data in self.data['packages'].items():
pkg_data = dict(pkg_data)
pkg_data['dir'] = os.path.abspath(pkg_data['dir'])
data[pkg_name] = pkg_data
return data
@classmethod
def validate(self, data):
"""Validate the schema of a parsed YAML manifest."""
schema = load_manifest_schema()
jsonschema.validate(data, schema)
def load_manifest_schema():
path = Path(__file__).parent / '../manifest_schema.yaml'
yaml_data = path.read_text()
yaml = ruamel.yaml.YAML()
return yaml.load(yaml_data)
class TravisManifest(BaseManifest):
"""Manifest for Travis CI based single doc repo builds.
Unlike the original :class:`Manifest` that was driven by YAML, the
:class:`TravisManifest` is driven by environment variables available in
a Travis CI environment.
"""
def __init__(self):
super(TravisManifest, self).__init__()
@property
def doc_repo_url(self):
"""Git URL for the product's Git documentation repository derived
from ``$TRAVIS_REPO_SLUG`` and assumes the repo is hosted on GitHub.
"""
slug = os.getenv('TRAVIS_REPO_SLUG')
if slug is None:
raise RuntimeError('Environment variable TRAVIS_REPO_SLUG not set')
parts = ('https', 'github.com', slug + '.git', '', '', '')
url = urlunparse(parts)
return url
@property
def doc_repo_ref(self):
"""Git ref (branch name) for the product's Git documentation
repository (:class:`str`) derived from ``$TRAVIS_BRANCH``.
"""
branch = os.getenv('TRAVIS_BRANCH')
if branch is None:
raise RuntimeError('Environment variable TRAVIS_BRANCH not set')
return branch
@property
def product_name(self):
"""Name of the documentation product for LTD Keeper derived from
LTD_MASON_PRODUCT environment variable.
"""
name = os.getenv('LTD_MASON_PRODUCT')
if name is None:
message = 'Environment variable LTD_MASON_PRODUCT not set'
raise RuntimeError(message)
return name
@property
def build_id(self):
"""Build ID is set to `None` to allow LTD Keeper to set an ID."""
return None
@property
def requester_github_handle(self):
"""The GitHub user triggering a build: this is not available on Travis.
Set to `None`.
"""
return None
@property
def refs(self):
"""`list` of Git refs that define the overal version set of the
products. On travis this is a one-item list with the branch name.
"""
return [self.doc_repo_ref]
@property
def packages(self):
"""Not applicable for Travis builds. Set to an empty list."""
return dict()
| lsst-sqre/ltd-mason | ltdmason/manifest.py | Python | mit | 7,686 |
"""The tests for the Roku remote platform."""
from unittest.mock import patch
from homeassistant.components.remote import (
ATTR_COMMAND,
DOMAIN as REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.typing import HomeAssistantType
from tests.components.roku import UPNP_SERIAL, setup_integration
from tests.test_util.aiohttp import AiohttpClientMocker
MAIN_ENTITY_ID = f"{REMOTE_DOMAIN}.my_roku_3"
# pylint: disable=redefined-outer-name
async def test_setup(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test setup with basic config."""
await setup_integration(hass, aioclient_mock)
assert hass.states.get(MAIN_ENTITY_ID)
async def test_unique_id(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test unique id."""
await setup_integration(hass, aioclient_mock)
entity_registry = er.async_get(hass)
main = entity_registry.async_get(MAIN_ENTITY_ID)
assert main.unique_id == UPNP_SERIAL
async def test_main_services(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test platform services."""
await setup_integration(hass, aioclient_mock)
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID},
blocking=True,
)
remote_mock.assert_called_once_with("poweroff")
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID},
blocking=True,
)
remote_mock.assert_called_once_with("poweron")
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID, ATTR_COMMAND: ["home"]},
blocking=True,
)
remote_mock.assert_called_once_with("home")
| w1ll1am23/home-assistant | tests/components/roku/test_remote.py | Python | apache-2.0 | 2,309 |
from core.models import *
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, redirect, get_object_or_404
#import redis
#r = redis.StrictRedis(host='localhost', port=6379, db=0)
def home(request):
return render(request, 'index.html', locals())
def register(request):
form = UserCreationForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
new_user = form.save()
try:
new_user = authenticate(username=new_user.username, password=request.POST['password2'])
if new_user is not None:
login(request, new_user)
except:
pass
return redirect('home')
return render(request, 'register.html', locals()) | mburst/gevent-socketio-starterkit | djangoproject/core/views.py | Python | mit | 855 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from unittest.mock import MagicMock, Mock
import pytest
from pytest import fixture
from airflow.exceptions import AirflowException
from airflow.models.connection import Connection
from airflow.providers.microsoft.azure.hooks.azure_data_factory import (
AzureDataFactoryHook,
provide_targeted_factory,
)
from airflow.utils import db
DEFAULT_RESOURCE_GROUP = "defaultResourceGroup"
RESOURCE_GROUP = "testResourceGroup"
DEFAULT_FACTORY = "defaultFactory"
FACTORY = "testFactory"
MODEL = object()
NAME = "testName"
ID = "testId"
def setup_module():
connection = Connection(
conn_id="azure_data_factory_test",
conn_type="azure_data_factory",
login="clientId",
password="clientSecret",
extra=json.dumps(
{
"tenantId": "tenantId",
"subscriptionId": "subscriptionId",
"resourceGroup": DEFAULT_RESOURCE_GROUP,
"factory": DEFAULT_FACTORY,
}
),
)
db.merge_conn(connection)
@fixture
def hook():
client = AzureDataFactoryHook(conn_id="azure_data_factory_test")
client._conn = MagicMock(
spec=[
"factories",
"linked_services",
"datasets",
"pipelines",
"pipeline_runs",
"triggers",
"trigger_runs",
]
)
return client
def parametrize(explicit_factory, implicit_factory):
def wrapper(func):
return pytest.mark.parametrize(
("user_args", "sdk_args"),
(explicit_factory, implicit_factory),
ids=("explicit factory", "implicit factory"),
)(func)
return wrapper
def test_provide_targeted_factory():
def echo(_, resource_group_name=None, factory_name=None):
return resource_group_name, factory_name
conn = MagicMock()
hook = MagicMock()
hook.get_connection.return_value = conn
conn.extra_dejson = {}
assert provide_targeted_factory(echo)(hook, RESOURCE_GROUP, FACTORY) == (RESOURCE_GROUP, FACTORY)
conn.extra_dejson = {"resourceGroup": DEFAULT_RESOURCE_GROUP, "factory": DEFAULT_FACTORY}
assert provide_targeted_factory(echo)(hook) == (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)
with pytest.raises(AirflowException):
conn.extra_dejson = {}
provide_targeted_factory(echo)(hook)
@parametrize(
explicit_factory=((RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY)),
implicit_factory=((), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)),
)
def test_get_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_factory(*user_args)
hook._conn.factories.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_create_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_factory(*user_args)
hook._conn.factories.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_update_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._factory_exists = Mock(return_value=True)
hook.update_factory(*user_args)
hook._conn.factories.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_update_factory_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._factory_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Factory .+ does not exist"):
hook.update_factory(*user_args)
@parametrize(
explicit_factory=((RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY)),
implicit_factory=((), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)),
)
def test_delete_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_factory(*user_args)
hook._conn.factories.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_linked_service(*user_args)
hook._conn.linked_services.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_linked_service(*user_args)
hook._conn.linked_services.create_or_update(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._linked_service_exists = Mock(return_value=True)
hook.update_linked_service(*user_args)
hook._conn.linked_services.create_or_update(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_linked_service_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._linked_service_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Linked service .+ does not exist"):
hook.update_linked_service(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_linked_service(*user_args)
hook._conn.linked_services.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_dataset(*user_args)
hook._conn.datasets.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_dataset(*user_args)
hook._conn.datasets.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._dataset_exists = Mock(return_value=True)
hook.update_dataset(*user_args)
hook._conn.datasets.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_dataset_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._dataset_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Dataset .+ does not exist"):
hook.update_dataset(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_dataset(*user_args)
hook._conn.datasets.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_pipeline(*user_args)
hook._conn.pipelines.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_pipeline(*user_args)
hook._conn.pipelines.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._pipeline_exists = Mock(return_value=True)
hook.update_pipeline(*user_args)
hook._conn.pipelines.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_pipeline_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._pipeline_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Pipeline .+ does not exist"):
hook.update_pipeline(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_pipeline(*user_args)
hook._conn.pipelines.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_run_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.run_pipeline(*user_args)
hook._conn.pipelines.create_run.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, ID)),
implicit_factory=((ID,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, ID)),
)
def test_get_pipeline_run(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_pipeline_run(*user_args)
hook._conn.pipeline_runs.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, ID)),
implicit_factory=((ID,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, ID)),
)
def test_cancel_pipeline_run(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.cancel_pipeline_run(*user_args)
hook._conn.pipeline_runs.cancel.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_trigger(*user_args)
hook._conn.triggers.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_trigger(*user_args)
hook._conn.triggers.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._trigger_exists = Mock(return_value=True)
hook.update_trigger(*user_args)
hook._conn.triggers.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_trigger_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._trigger_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Trigger .+ does not exist"):
hook.update_trigger(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_trigger(*user_args)
hook._conn.triggers.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_start_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.start_trigger(*user_args)
hook._conn.triggers.begin_start.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_stop_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.stop_trigger(*user_args)
hook._conn.triggers.begin_stop.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, ID)),
implicit_factory=((NAME, ID), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, ID)),
)
def test_rerun_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.rerun_trigger(*user_args)
hook._conn.trigger_runs.rerun.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, ID)),
implicit_factory=((NAME, ID), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, ID)),
)
def test_cancel_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.cancel_trigger(*user_args)
hook._conn.trigger_runs.cancel.assert_called_with(*sdk_args)
| dhuang/incubator-airflow | tests/providers/microsoft/azure/hooks/test_azure_data_factory.py | Python | apache-2.0 | 15,858 |
import minecraft as minecraft
import random
import time
x = 128
y = 2
z = 128
mc = minecraft.Minecraft.create()
while y < 63:
j = mc.getBlock(x,y,z)
if j == 0:
mc.setBlock(x,y,z,8)
z = z - 1
if z <= -128:
z = 128
x = x - 1
if x<= -128:
x = 128
y = y + 1
| mohsraspi/mhscs14 | jay/wowobsidian.py | Python | gpl-2.0 | 437 |
from django.http import HttpResponse
from django.shortcuts import render_to_response
from models import HttpResponse
def hello_world(request):
return HttpResponse('hello world')
| MeirKriheli/Open-Knesset | simple/views.py | Python | bsd-3-clause | 184 |
"""
WSGI config for expression_data project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "expression_data.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| davebridges/expression-data-server | expression_data/expression_data/wsgi.py | Python | bsd-2-clause | 1,152 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 06 14:24:18 2017
@author: jpeacock
"""
#==============================================================================
# Imports
#==============================================================================
#import os
#import time
import numpy as np
#import mtpy.usgs.zen as zen
#import pandas as pd
import mtpy.core.ts as mtts
reload(mtts)
#============================================================================
fn = r"c:\Users\jrpeacock\Documents\GitHub\processing\test_data\mb311_20170308_150518_256.EX"
# read header
with open(fn, 'r') as fid:
header_line = fid.readline()
header_list = header_line[1:].strip().split()
# read in data, this is extremely slow
data = np.loadtxt(fn, skiprows=1)
n = data.size
# new file
h5_fn = r"c:\Users\jrpeacock\test_no_index_09.h5"
#==============================================================================
# Fill ts
#==============================================================================
test_ts = mtts.MT_TS()
test_ts.ts = data
test_ts.station = header_list[0]
test_ts.component = header_list[1]
test_ts.sampling_rate = float(header_list[2])
test_ts.start_time_epoch_sec = float(header_list[3])
test_ts.n_samples = int(header_list[4])
test_ts.units = header_list[5]
test_ts.lat = float(header_list[6])
test_ts.lon = float(header_list[7])
test_ts.elev = float(header_list[8])
test_ts.coordinate_system = 'geomagnetic'
test_ts.dipole_length = 100.
test_ts.azimuth = 0
test_ts.datum = 'WGS84'
test_ts.data_logger = 'Zonge Zen'
test_ts.instrument_num = None
test_ts.calibration_fn = None
test_ts.declination = 3.6
# write file
test_ts.write_hdf5(h5_fn, compression_level=9)
test_ts.estimate_spectra(**{'nperseg':2**10})
#
#read_ts = mtts.MT_TS()
#read_ts.read_hdf5(h5_fn)
#read_ts = MT_TS()
#read_ts.read_hdf5(h5_fn)
#
#read_ts.write_ascii_file()
| simpeg/processing | tests/test_ts.py | Python | mit | 1,875 |
from collections import namedtuple
import pytest
from app.utils import get_errors_for_csv
MockRecipients = namedtuple(
'RecipientCSV',
[
'rows_with_bad_recipients',
'rows_with_missing_data'
]
)
@pytest.mark.parametrize(
"rows_with_bad_recipients,rows_with_missing_data,template_type,expected_errors",
[
(
[], [],
'sms',
[]
),
(
{2}, [],
'sms',
['fix 1 phone number']
),
(
{2, 4, 6}, [],
'sms',
['fix 3 phone numbers']
),
(
{1}, [],
'email',
['fix 1 email address']
),
(
{2, 4, 6}, [],
'email',
['fix 3 email addresses']
),
(
{2}, {3},
'sms',
[
'fix 1 phone number',
'enter missing data in 1 row'
]
),
(
{2, 4, 6, 8}, {3, 6, 9, 12},
'sms',
[
'fix 4 phone numbers',
'enter missing data in 4 rows'
]
)
]
)
def test_get_errors_for_csv(
rows_with_bad_recipients, rows_with_missing_data,
template_type,
expected_errors
):
assert get_errors_for_csv(
MockRecipients(rows_with_bad_recipients, rows_with_missing_data),
template_type
) == expected_errors
| gov-cjwaszczuk/notifications-admin | tests/app/main/test_errors_for_csv.py | Python | mit | 1,478 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
from setuptools import setup
name = 'drf-proxy-pagination'
package = 'proxy_pagination'
description = 'Pagination class for Django REST Framework to choose pagination class by query parameter'
url = 'https://github.com/tuffnatty/drf-proxy-pagination'
author = 'Phil Krylov'
author_email = '[email protected]'
license = 'MIT'
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]",
init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version(package)
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep wheel"):
print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
sys.exit()
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(version))
print(" git push --tags")
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=[],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
]
)
| tuffnatty/drf-proxy-pagination | setup.py | Python | mit | 2,908 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# File name: a.py
#
# Creation date: 09-04-2015
#
# Created by: Pavlos Parissis <[email protected]>
#
import re
def main():
# [[:digit:]]{1,}\. [0-9A-Za-z_]{1,} \[.*B.*\]:'
frontend = []
backend = []
server = []
with open('/home/pparissis/configuration.txt') as file:
for line in file:
line = line.strip()
match = re.search(r'\d+\. (\w+) (\[.*\]:) .*', line)
if match:
if 'F' in match.group(2):
frontend.append(match.group(1))
if 'B' in match.group(2):
backend.append(match.group(1))
if 'S' in match.group(2):
server.append(match.group(1))
print("FRONTEND_METRICS = [")
for m in frontend:
print("{:<4}'{}',".format('', m))
print("]")
print("POOL_METRICS = [")
for m in backend:
print("{:<4}'{}',".format('', m))
print("]")
print("SERVER_METRICS = [")
for m in server:
print("{:<4}'{}',".format('', m))
print("]")
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| unixsurfer/haproxyadmin | tools/generate_constants_for_metrics.py | Python | apache-2.0 | 1,229 |
#!/usr/bin/env python
#
# Copyright 2009 Facebook
# Modifications copyright 2016 Meteotest
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import sys
import threading
import time
import traceback
import math
import select
from hurray.server import stack_context
from hurray.server.concurrent import TracebackFuture, is_future
from hurray.server.log import app_log, gen_log
from hurray.server.platform.auto import set_close_exec, Waker
from hurray.server.util import PY3, Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
if PY3:
import _thread as thread
else:
import thread
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import hurray.server.ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = hurray.server.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. In most other cases, it is better to use `current()`
to get the current thread's `IOLoop`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
When using an `IOLoop` subclass, `install` must be called prior
to creating any objects that implicitly create their own
`IOLoop`.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from hurray.server.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from hurray.server.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from hurray.server.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
if IOLoop.current(instance=False) is not None:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('hurray').handlers,
logging.getLogger('hurray.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either a yieldable object or
``None``. If the function returns a yieldable object, the
`IOLoop` will run until the yieldable is resolved (and
`run_sync()` will return the yieldable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `hurray.server.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-yieldable value is now an error.
"""
future_cell = [None]
def run():
try:
result = func()
if result is not None:
from hurray.server.gen import convert_yielded
result = convert_yielded(result)
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None:
from hurray.server import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, lambda f: f.result())
except Exception:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `hurray.server.platform.epoll.EPollIOLoop`
(Linux), `hurray.server.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`hurray.server.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in self._handlers.values():
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512 and
self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for callback in callbacks:
self._run_callback(callback)
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that modify self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
if thread.get_ident() != self._thread_ident:
# If we're not on the IOLoop's thread, we need to synchronize
# with other threads, or waking logic will induce a race.
with self._callback_lock:
if self._closing:
return
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty:
# If we're not in the IOLoop's thread, and we added the
# first callback to an empty list, we may need to wake it
# up (it may wake up on its own, but an occasional extra
# wake is harmless). Waking up a polling IOLoop is
# relatively expensive, so we try to avoid it when we can.
self._waker.wake()
else:
if self._closing:
return
# If we're on the IOLoop's thread, we don't need the lock,
# since we don't need to wake anyone, just add the
# callback. Blindly insert into self._callbacks. This is
# safe even from signal handlers because the GIL makes
# list.append atomic. One subtlety is that if the signal
# is interrupting another thread holding the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks, but
# either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tdeadline']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tdeadline = (deadline, next(io_loop._timeout_counter))
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return self.tdeadline < other.tdeadline
def __le__(self, other):
return self.tdeadline <= other.tdeadline
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) /
callback_time_sec) + 1) * callback_time_sec
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
| meteotest/hurray | hurray/server/ioloop.py | Python | bsd-3-clause | 41,487 |
# -*- coding:utf-8 -*-
'''
输入一个链表,从尾到头打印链表每个节点的值。
'''
'''
方法一:
使用insert()方法
方法二:
使用append()和reverse()方法
'''
class ListNode:
def __init__(self, x = None):
self.val = x
self.next = None
class Solution_1:
def ListFromTailToHead(self, ListNode):
if ListNode.val == None:
return None
if ListNode.next == None:
return ListNode.val
reverse_list = []
head = ListNode
while head:
# insert(0, item), index = 0 始终设置为第一个元素,类似压栈
reverse_list.insert(0, head.val)
head = head.next
return reverse_list
class Solution_2:
def ListFromTailToHead(self, ListNode):
if ListNode.val == None:
return None
if ListNode.next == None:
return ListNode.val
reverse_list = []
head = ListNode
while head:
reverse_list.append(head.val)
head = head.next
reverse_list.reverse()
return reverse_list
# test data
node1 = ListNode(10)
node2 = ListNode(11)
node3 = ListNode(13)
node1.next = node2
node2.next = node3
single_node = ListNode(12)
no_node = ListNode()
test_1 = Solution_1()
test_2 = Solution_2()
print test_1.ListFromTailToHead(node1)
print test_2.ListFromTailToHead(node1)
print test_1.ListFromTailToHead(single_node)
print test_2.ListFromTailToHead(single_node)
print test_1.ListFromTailToHead(no_node)
print test_2.ListFromTailToHead(no_node) | shabbylee/algorithm | 从尾到头打印链表.py | Python | apache-2.0 | 1,593 |
from corehq.apps.app_manager.management.commands.helpers import (
AppMigrationCommandBase,
)
from corehq.apps.app_manager.models import Application
class Command(AppMigrationCommandBase):
help = "Migrate preload dict in advanced forms to " \
"allow loading the same case property into multiple questions."
include_builds = False
def migrate_app(self, app_doc):
modules = [m for m in app_doc['modules'] if m.get('module_type', '') == 'advanced']
should_save = False
for module in modules:
forms = module['forms']
for form in forms:
load_actions = form.get('actions', {}).get('load_update_cases', [])
for action in load_actions:
preload = action['preload']
if preload and list(preload.values())[0].startswith('/'):
action['preload'] = {v: k for k, v in preload.items()}
should_save = True
return Application.wrap(app_doc) if should_save else None
| dimagi/commcare-hq | corehq/apps/app_manager/management/commands/migrate_advanced_form_preload.py | Python | bsd-3-clause | 1,053 |
# coding=utf-8
"""Docstring for this file."""
__author__ = 'ismailsunni'
__project_name = 'watchkeeper'
__filename = 'movement'
__date__ = '5/11/15'
__copyright__ = '[email protected]'
__doc__ = ''
from django import forms
from event_mapper.utilities.commons import get_verbose_name, get_help_text
from event_mapper.models.movement import Movement
from event_mapper.models.country import Country
from datetime import datetime
class MovementUpdateForm(forms.Form):
"""A form for rating a movement."""
region = forms.ModelChoiceField(
label='Region',
queryset=Country.objects.order_by('name'),
widget=forms.Select(
attrs={
'class': 'form-control'
}
),
)
risk_level = forms.ChoiceField(
label='New risk level',
widget=forms.Select(
attrs={'class': 'form-control'}),
choices=Movement.RISK_LEVELS,
)
movement_state = forms.ChoiceField(
label='New movement state',
widget=forms.Select(
attrs={'class': 'form-control'}),
choices=Movement.MOVEMENT_STATES,
)
notified_immediately = forms.BooleanField(
label=get_verbose_name(Movement, 'notified_immediately'),
help_text=get_help_text(Movement, 'notified_immediately'),
widget=forms.CheckboxInput(
attrs={'class': 'form-control'}),
required=False
)
notes = forms.CharField(
label=get_verbose_name(Movement, 'notes'),
widget=forms.Textarea(
attrs={'class': 'form-control',
'placeholder': get_help_text(Movement, 'notes')}),
required=False,
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
self.country_id = kwargs.pop('country_id', None)
super(MovementUpdateForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
"""Override save method."""
data = self.cleaned_data
movement = super(MovementUpdateForm, self).save(commit=False)
movement.last_updater = self.user
movement.country = data.region
if commit:
movement.save()
return movement
def update(self):
data = self.cleaned_data
country_id = self.cleaned_data['region'].id
country = Country.objects.get(pk=country_id)
if not hasattr(country, 'movement'):
country.movement = Movement()
country.movement.risk_level = data['risk_level']
country.movement.movement_state = data['movement_state']
country.movement.notes = data['notes']
country.movement.last_updater = self.user
country.movement.last_updated_time = datetime.now()
country.movement.save()
country.save()
return country.movement
| MariaSolovyeva/watchkeeper | django_project/event_mapper/forms/movement.py | Python | bsd-2-clause | 2,831 |
from django.forms import ValidationError
from django.test import TestCase
from nap.serialiser import fields
class Mock(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class FieldTestCase(TestCase):
'''
Field cycles:
deflate: digattr -> reduce -> data[name]
inflate: data[name] -> restore -> dest[name]
'''
def test_000_field(self):
data = {}
field = fields.Field()
field.deflate('value', Mock(value='str'), data)
self.assertEqual('str', data['value'])
dest = {}
field.inflate('value', data, dest)
self.assertEqual(dest['value'], data['value'])
def test_000_field_none(self):
'''None is treated specially.'''
data = {}
field = fields.Field()
field.deflate('value', Mock(value=None), data)
self.assertTrue(data['value'] is None)
dest = {}
field.inflate('value', data, dest)
self.assertEqual(data['value'], dest['value'])
def test_000_field_default(self):
'''
With no default set, we can't deflate a field with no value.
'''
data = {}
field = fields.Field()
with self.assertRaises(AttributeError):
field.deflate('value', Mock(), data)
def test_000_readonly(self):
data = {'value': 1}
dest = {}
field = fields.Field(readonly=True)
field.inflate('value', data, dest)
self.assertNotIn('value', dest)
def test_000_nodefault(self):
data = {}
dest = {}
field = fields.Field()
field.inflate('value', data, dest)
self.assertNotIn('value', dest)
def test_000_default(self):
data = {}
dest = {}
field = fields.Field(default=1)
field.inflate('value', data, dest)
self.assertEqual(dest['value'], 1)
def test_000_notnull(self):
data = {'value': None}
dest = {}
field = fields.Field(null=False)
with self.assertRaises(ValidationError):
field.inflate('value', data, dest)
def test_001_boolean(self):
data = {}
field = fields.BooleanField()
field.deflate('value', Mock(value=True), data)
self.assertTrue(data['value'] is True)
dest = {}
field.inflate('value', data, dest)
self.assertTrue(dest['value'] is True)
def test_002_integer(self):
data = {}
field = fields.IntegerField()
field.deflate('value', Mock(value=7), data)
self.assertEqual(data['value'], 7)
dest = {}
field.inflate('value', data, dest)
self.assertEqual(dest['value'], data['value'])
def test_003_decimal(self):
from decimal import Decimal
data = {}
field = fields.DecimalField()
field.deflate('value', Mock(value=Decimal('1.05')), data)
# JS has no Decimal type, only float
self.assertEqual(data['value'], 1.05)
dest = {}
field.inflate('value', data, dest)
self.assertEqual(dest['value'], data['value'])
def test_004_datetime(self):
from datetime import datetime
data = {}
field = fields.DateTimeField()
when = datetime(2010, 11, 5, 12, 7, 19)
field.deflate('value', Mock(value=when), data)
self.assertEqual(data['value'], '2010-11-05 12:07:19')
dest = {}
field.inflate('value', data, dest)
self.assertEqual(dest['value'], when)
def test_005_date(self):
from datetime import date
data = {}
field = fields.DateField()
when = date(2010, 11, 5)
field.deflate('value', Mock(value=when), data)
self.assertEqual(data['value'], '2010-11-05')
dest = {}
field.inflate('value', data, dest)
self.assertEqual(dest['value'], when)
def test_006_time(self):
from datetime import time
data = {}
field = fields.TimeField()
when = time(12, 7, 19)
field.deflate('value', Mock(value=when), data)
self.assertEqual(data['value'], '12:07:19')
dest = {}
field.inflate('value', data, dest)
self.assertEqual(dest['value'], when)
def test_007_serialiser(self):
from nap.serialiser import Serialiser
class SimpleSerialiser(Serialiser):
a = fields.Field()
b = fields.Field()
c = fields.Field()
data = {}
field = fields.SerialiserField(serialiser=SimpleSerialiser())
value = Mock(value=Mock(a=1, b='two', c=3.0))
field.deflate('value', value, data)
self.assertEqual(data['value']['a'], 1)
self.assertEqual(data['value']['b'], 'two')
self.assertEqual(data['value']['c'], 3.0)
| limbera/django-nap | tests/test_fields.py | Python | bsd-3-clause | 4,834 |
import logging
from datetime import timedelta
pygame = None
from rx.internal import PriorityQueue
from rx.concurrency.schedulerbase import SchedulerBase
from rx.concurrency.scheduleditem import ScheduledItem
log = logging.getLogger("Rx")
class PyGameScheduler(SchedulerBase):
"""A scheduler that schedules works for PyGame.
http://www.pygame.org/docs/ref/time.html
http://www.pygame.org/docs/ref/event.html"""
def __init__(self, event_id=None):
global pygame
import pygame
self.timer = None
self.event_id = event_id or pygame.USEREVENT
self.queue = PriorityQueue()
def schedule(self, action, state=None):
"""Schedules an action to be executed."""
log.debug("PyGameScheduler.schedule(state=%s)", state)
return self.schedule_relative(0, action, state)
def run(self):
while len(self.queue):
item = self.queue.peek()
diff = item.duetime - self.now
if diff > timedelta(0):
break
item = self.queue.dequeue()
if not item.is_cancelled():
item.invoke()
def schedule_relative(self, duetime, action, state=None):
"""Schedules an action to be executed after duetime.
Keyword arguments:
duetime -- {timedelta} Relative time after which to execute the action.
action -- {Function} Action to be executed.
Returns {Disposable} The disposable object used to cancel the scheduled
action (best effort)."""
dt = self.now + self.to_timedelta(duetime)
si = ScheduledItem(self, state, action, dt)
self.queue.enqueue(si)
return si.disposable
def schedule_absolute(self, duetime, action, state=None):
"""Schedules an action to be executed at duetime.
Keyword arguments:
duetime -- {datetime} Absolute time after which to execute the action.
action -- {Function} Action to be executed.
Returns {Disposable} The disposable object used to cancel the scheduled
action (best effort)."""
return self.schedule_relative(duetime - self.now, action, state)
@property
def now(self):
"""Represents a notion of time for this scheduler. Tasks being scheduled
on a scheduler will adhere to the time denoted by this property."""
return self.to_datetime(pygame.time.get_ticks())
| Sprytile/Sprytile | rx/concurrency/mainloopscheduler/pygamescheduler.py | Python | mit | 2,426 |
#! /usr/bin/env python
# @author Billy Wilson Arante
# @created 1/7/2016 PHT
# @modified 1/8/2016 PHT
# @description
# A. Encrypts a message using Caesar cipher
# B. Decrypts a message back from Caesar cipher encryption
def encrypt_word(in_word, shift_val):
enc_word = ""
for char in in_word:
if (ord(char) + shift_val) > 90:
enc_word = enc_word + chr(ord(char) + shift_val - 26)
else:
enc_word = enc_word + chr(ord(char) + shift_val)
return enc_word
def encrypt_msg(in_msg, shift_val):
words = in_msg.split()
enc_words = ""
for word in words:
enc_words = enc_words + encrypt_word(word, shift_val) + " "
return enc_words
def decrypt_word(in_word, shift_val):
dec_word = ""
for char in in_word:
if (ord(char) - shift_val) < 65:
dec_word = dec_word + chr(ord(char) - shift_val + 26)
else:
dec_word = dec_word + chr(ord(char) - shift_val)
return dec_word
def decrypt_msg(in_msg, shift_val):
words = in_msg.split()
dec_words = ""
for word in words:
dec_words = dec_words + decrypt_word(word, shift_val) + " "
return dec_words
def test():
# Encryption example
print("Encrypted messages:")
enc_msg1 = encrypt_msg("JOIN ME AT EIGHT BY THE ZOO", 2)
print(enc_msg1)
enc_msg2 = encrypt_msg("SPY CODER", 5)
print(enc_msg2)
# Decryption example
print("Decrypted messages:")
dec_msg1 = decrypt_msg(enc_msg1, 2)
print(dec_msg1)
dec_msg2 = decrypt_msg(enc_msg2, 5)
print(dec_msg2)
if __name__ == "__main__":
test()
| arantebillywilson/python-snippets | py3/cs-circle/caesar_cipher.py | Python | mit | 1,613 |
# -*- coding: utf-8 -*-
import re
import numpy as np
from scipy.optimize import leastsq
import matplotlib.pyplot as pl
def func(x, p):
a2, a1, a0 = p
return a2 * x * x + a1 * x + a0
def residuals(p, y, x):
return y - func(x, p)
# hack
x = [0, 1, 2, 3, 4, 5, 6, 8]
y = [1, 2, 2, 2, 3, 3, 4, 4]
p = (0, 0, 0)
if (len(x) == 0):
print("Total of samples:")
n = int(input())
for i in range(n):
x0, y0 = tuple(map(int, re.split(r' *', input())))
x.append(x0)
y.append(y0)
x = np.array(x)
y = np.array(y)
plsq = leastsq(residuals, p, args=(y, x))
print(plsq)
pl.scatter(x, y)
pl.plot(x, func(x, plsq[0]), label="fiited curve", marker="*")
pl.legend()
pl.show()
| voidrank/science_python | least_square/leastsq_1.py | Python | mit | 710 |
#!/usr/bin/env python3
import os
import socket
import errno
from time import sleep
from pySilkroadSecurity import SilkroadSecurity
from stream import *
def HandlePacket(security, packet):
r = stream_reader(packet['data'])
if packet['opcode'] == 0x2001:
server = r.read_ascii(r.read_uint16())
if server == 'GatewayServer':
w = stream_writer()
w.write_uint8(18)
w.write_uint16(9)
w.write_ascii('SR_Client')
w.write_uint32(432)
security.Send(0x6100, w.tolist(), True)
elif packet['opcode'] == 0xA100:
security.Send(0x6101, [], True)
elif packet['opcode'] == 0xA101:
entry = r.read_uint8()
while entry == 1:
r.read_uint8()
print(r.read_ascii(r.read_uint16()))
entry = r.read_uint8()
print('')
entry = r.read_uint8()
while entry == 1:
server_id = r.read_uint16()
name = r.read_ascii(r.read_uint16())
capacity = r.read_float()
state = r.read_uint8()
print('[%s] %f' % (name, capacity * 100))
entry = r.read_uint8()
def main():
security = SilkroadSecurity()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('gwgt1.joymax.com', 15779))
s.setblocking(False)
try:
while 1:
try:
data = s.recv(8192)
if data is None:
break
# Packet received
security.Recv(list(data))
# See if there are packets that can be processed
packet = security.GetPacketToRecv()
if packet is not None:
# Iterate each returned packet
for p in packet:
# Process the packet
HandlePacket(security, p)
# See if there are packets to be sent
packet = security.GetPacketToSend()
# Send each packet in the list
if packet is not None:
for p in packet:
data = bytes(p)
while data:
sent = s.send(data)
data = data[sent:]
except socket.error as e:
if e.errno == errno.EWOULDBLOCK:
sleep(0.01)
else:
raise e
except KeyboardInterrupt:
''''''
# Cleanup
s.shutdown(socket.SHUT_RDWR)
s.close()
return 0
if __name__ == '__main__':
os._exit(main()) | ProjectHax/pySilkroadSecurity | python/pySilkroadStats.py | Python | unlicense | 2,038 |
# -*- coding: utf-8 -*-
'''
================================================================================
This confidential and proprietary software may be used only
as authorized by a licensing agreement from Thumb o'Cat Inc.
In the event of publication, the following notice is applicable:
Copyright (C) 2013 - 2014 Thumb o'Cat
All right reserved.
The entire notice above must be reproduced on all authorized copies.
================================================================================
File : libvio.py
Author(s) : Luuvish
Version : 2.0
Revision :
1.0 Aug 28, 2013 first release
2.0 May 12, 2014 Executor classify
================================================================================
'''
__all__ = ('LibVio', )
__version__ = '2.0.0'
from . import rootpath, ModelExecutor
class LibVio(ModelExecutor):
model = 'libvio'
codecs = ('h264', )
actions = ('decode', 'digest_by_frames', 'compare')
def __init__(self, codec, **kwargs):
from os.path import join
super(LibVio, self).__init__(codec, **kwargs)
binary = 'bin/libvio'
self._execute = join(rootpath, binary)
self.defaults['digest'] += ['-5']
def execute(self):
return self._execute
def options(self, source, target):
return ['-i', source, '-o', target]
def decode(self, source, target):
from os import remove
super(LibVio, self).decode(source, target)
remove('dataDec.txt')
remove('log.dec')
| luuvish/libvio | script/test/model/libvio.py | Python | mit | 1,572 |
import xml.etree.ElementTree as ET
from stats import parsers
from django.conf import settings
from pytvdbapi import api
import requests
def query_service(url, headers={}, payload={}):
"""
A generalised function that handles making requests and
injecting a user agent header.
No assumption is made about the Content-Type being returned.
You'll need to perform any required parsing in the sources/parser
function (eg json.loads or xml.etree.ElementTree)
:param url: A string containing the URL to be requested.
:param method: A string containing either GET or POST
:param headers: A dictionary containing any other required headers.
:param payload: A dictionary containing data to send
:return: A string or dictionary.
"""
headers['User-Agent'] = settings.USER_AGENT
if bool(payload):
r = requests.post(url, headers=headers, data=payload)
else:
r = requests.get(url, headers=headers)
if 'json' in headers['Content-Type']:
return r.json()
return r.text
def book_covers(title):
url = ('https://goodreads.com/search/index.xml?key={0}'
'&q={1}'.format(settings.GOODREADS, title))
headers = {'Content-Type': 'application/xml'}
try:
data = query_service(url, headers)
root = ET.fromstring(data)
cover = root[1][6][0][8][3].text
img = 'https://images.gr-assets.com/books/{}/{}'.format(
cover.split('/')[4].replace('m', 'l'),
cover.split('/')[5]
)
except Exception:
img = 'https://static.thingsima.de/shared/img/no_cover.png'
return img
def film_covers(tmdb):
"""
This function fetches film posters, sometimes called covers,
from TheMovieDB.
In the event that a cover can't be found, a local placeholder
will be used instead.
I've never actually had it trigger though
since film posters are seemingly always available.
:param tmdb: A string containing an ID for TheMovieDB entry.
:return: A string containing a URL to an image.
"""
url = ('https://api.themoviedb.org/3/movie/{}/images'
'?api_key={}'.format(tmdb, settings.TMDB))
headers = {'Content-Type': 'application/json'}
try:
data = query_service(url, headers)
poster = data['posters'][0]['file_path']
img = 'https://image.tmdb.org/t/p/w500{}'.format(poster)
except Exception:
img = 'https://static.thingsima.de/shared/img/no_cover.png'
return img
def game_data(title):
"""
This function fetches game cover art and other data from Giant Bomb.
It assumes that the first result which has the resource_type of game
is going to be the correct entry.
:param title: A string containing the name of a videogame.
:return A dictionary containing a game name, image, id and release year
"""
url = ('https://www.giantbomb.com/api/search?query={0}'
'&api_key={1}&format=json'.format(title, settings.GIANTBOMB))
headers = {'Content-Type': 'application/json'}
game = {}
try:
data = query_service(url, headers)
entries = data['results']
entry = list(filter(lambda x: x['resource_type'] == 'game', entries))[0]
game['img'] = entry['image']['super_url']
game['link'] = entry['site_detail_url']
game['name'] = entry['name']
game['year'] = int(entry['original_release_date'][0:3])
except Exception:
game['img'] = 'https://static.thingsima.de/shared/img/no_cover.png'
return game
def show_covers(season, number, series):
"""
This function fetches show screenshots, called covers in this
instance. You can think of them as thumbnails but I needed a
word that could be generalised for movies and TV show images.
In the event that a cover can't be found, a local placeholder
will be used instead.
This often triggers for recently aired shows but it's usually
fixed within a day or so. You'll probably find anime is the
most lacking since there's less eyeballs on fresh seasonal
releases compared to eg; a Netflix series.
:param season: A string containing the season number of the
requested episode.
:param number: A string containing the episode number of the
requested episode.
:param series: A string containing the name of the requested
show.
:return: A string containing a URL to an image.
"""
tvdb = api.TVDB(settings.TVDB)
result = tvdb.search(series, 'en')
try:
show = result[0]
url = show[season][number].filename
if url != '':
img = 'https://www.thetvdb.com/banners/{}'.format(url)
else:
raise Exception
except Exception:
img = 'https://static.thingsima.de/shared/img/no_still.png'
return img
def books():
"""
Calling this kicks off everything required to store recently
read books in the database.
:return: N/A
"""
url = ('https://www.goodreads.com/review/list?'
'shelf=currently-reading&key={0}&id={1}'
'v=2'.format(settings.GOODREADS, settings.GOODREADS_ID))
headers = {'Content-Type': 'application/xml'}
data = query_service(url, headers)
root = ET.fromstring(data)[1]
parsers.goodreads(root)
def games():
"""
Calling this kicks off everything required to store recently
played games in the database.
:return: N/A
"""
payload = {'n': settings.HLTB, 'playing': '1'}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
url = 'https://howlongtobeat.com/user_games_list.php'
data = query_service(url, headers=headers, payload=payload)
parsers.howlongtobeat(data)
def movies():
"""
Calling this kicks off everything required to store recently
watched movies in the database.
:return: N/A
"""
url = 'https://api.trakt.tv/users/sentry/history/movies'
headers = {'Content-Type': 'application/json',
'trakt-api-version': '2',
'trakt-api-key': settings.TRAKT}
data = query_service(url, headers)
parsers.trakt_movies(data)
def music():
"""
Calling this kicks off everything required to store recently
listened music in the database.
:return: N/A
"""
url = ('http://ws.audioscrobbler.com/2.0/?'
'method=user.getrecenttracks'
'&user=sentryism&api_key={}'
'&format=json&limit=10'.format(settings.LASTFM))
headers = {'Content-Type': 'application/json'}
data = query_service(url, headers)
parsers.lastfm(data)
def shows():
"""
Calling this kicks off everything required to store recently
watched TV series in the database.
:return: N/A
"""
url = 'https://api.trakt.tv/users/sentry/history/episodes'
headers = {'Content-Type': 'application/json',
'trakt-api-version': '2',
'trakt-api-key': settings.TRAKT}
data = query_service(url, headers)
parsers.trakt_shows(data)
| marcus-crane/site | site/stats/sources.py | Python | mit | 7,067 |
import time
import bbio
import numpy as np
ULTRAS = ((bbio.GPIO1_12, bbio.GPIO1_13),
(bbio.GPIO0_3, bbio.GPIO0_2),
(bbio.GPIO1_17, bbio.GPIO0_15),
(bbio.GPIO3_21, bbio.GPIO0_14),
(bbio.GPIO3_19, bbio.GPIO3_16),)
# trigger duration
DECPULSETRIGGER = 0.0001
# loop iterations before timeout called
INTTIMEOUT = 2100
def setup():
print "setup..."
for trigger, echo in ULTRAS:
bbio.pinMode(echo, bbio.INPUT)
bbio.pinMode(trigger, bbio.OUTPUT)
bbio.digitalWrite(trigger, bbio.LOW)
def measure(trigger, echo):
bbio.digitalWrite(trigger, bbio.HIGH)
time.sleep(DECPULSETRIGGER)
bbio.digitalWrite(trigger, bbio.LOW)
# Wait for echo to go high (or timeout)
intcountdown = INTTIMEOUT
while (bbio.digitalRead(echo) == 0 and intcountdown > 0):
intcountdown = intcountdown - 1
# If echo is high
if intcountdown > 0:
# Start timer and init timeout countdown
echostart = time.time()
intcountdown = INTTIMEOUT
# Wait for echo to go low (or timeout)
while (bbio.digitalRead(echo) == 1 and intcountdown > 0):
intcountdown = intcountdown - 1
# Stop timer
echoend = time.time()
# Echo duration
echoduration = echoend - echostart
intdistance = (echoduration*1000000) / 58.0
return intdistance
def loop():
print "running..."
data = [[], [], [], [], []]
count = 10
for i in range(count):
i = 0
for trigger, echo in ULTRAS:
dist = measure(trigger, echo)
data[i].append(dist)
i += 1
sen = ('fm', 'br', 'fl', 'fr', 'bl')
for idx, i in enumerate(data):
mean = np.mean(i)
print "%s: mean %.2f cm min: %.2f cm max: %.2f cm diff: %.2f cm" % (
sen[idx], mean, min(i), max(i), max(i) - min(i))
bbio.stop()
bbio.run(setup, loop)
| delijati/ultrabot | test/ultra_pybbio.py | Python | mit | 1,932 |
# -*- coding: utf-8 -*-
"""
Load Cell 2.0 Plugin
Copyright (C) 2018 Olaf Lüke <[email protected]>
__init__.py: package initialization
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from brickv.plugin_system.plugins.load_cell_v2.load_cell_v2 import LoadCellV2
device_class = LoadCellV2
| Tinkerforge/brickv | src/brickv/plugin_system/plugins/load_cell_v2/__init__.py | Python | gpl-2.0 | 926 |
# -*- coding: utf-8 -*-
"""
Tests of responsetypes
"""
from datetime import datetime
import json
import os
import pyparsing
import random
import unittest
import textwrap
import requests
import mock
from . import new_loncapa_problem, test_capa_system
import calc
from capa.responsetypes import LoncapaProblemError, \
StudentInputError, ResponseError
from capa.correctmap import CorrectMap
from capa.util import convert_files_to_filenames
from capa.xqueue_interface import dateformat
from pytz import UTC
class ResponseTest(unittest.TestCase):
"""Base class for tests of capa responses."""
xml_factory_class = None
# If something is wrong, show it to us.
maxDiff = None
def setUp(self):
if self.xml_factory_class:
self.xml_factory = self.xml_factory_class()
def build_problem(self, capa_system=None, **kwargs):
xml = self.xml_factory.build_xml(**kwargs)
return new_loncapa_problem(xml, capa_system=capa_system)
def assert_grade(self, problem, submission, expected_correctness, msg=None):
input_dict = {'1_2_1': submission}
correct_map = problem.grade_answers(input_dict)
if msg is None:
self.assertEquals(correct_map.get_correctness('1_2_1'), expected_correctness)
else:
self.assertEquals(correct_map.get_correctness('1_2_1'), expected_correctness, msg)
def assert_answer_format(self, problem):
answers = problem.get_question_answers()
self.assertTrue(answers['1_2_1'] is not None)
def assert_multiple_grade(self, problem, correct_answers, incorrect_answers):
for input_str in correct_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'correct')
for input_str in incorrect_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'incorrect')
def _get_random_number_code(self):
"""Returns code to be used to generate a random result."""
return "str(random.randint(0, 1e9))"
def _get_random_number_result(self, seed_value):
"""Returns a result that should be generated using the random_number_code."""
rand = random.Random(seed_value)
return str(rand.randint(0, 1e9))
class MultiChoiceResponseTest(ResponseTest):
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
xml_factory_class = MultipleChoiceResponseXMLFactory
def test_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, False])
# Ensure that we get the expected grades
self.assert_grade(problem, 'choice_0', 'incorrect')
self.assert_grade(problem, 'choice_1', 'correct')
self.assert_grade(problem, 'choice_2', 'incorrect')
def test_named_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, False],
choice_names=["foil_1", "foil_2", "foil_3"])
# Ensure that we get the expected grades
self.assert_grade(problem, 'choice_foil_1', 'incorrect')
self.assert_grade(problem, 'choice_foil_2', 'correct')
self.assert_grade(problem, 'choice_foil_3', 'incorrect')
class TrueFalseResponseTest(ResponseTest):
from capa.tests.response_xml_factory import TrueFalseResponseXMLFactory
xml_factory_class = TrueFalseResponseXMLFactory
def test_true_false_grade(self):
problem = self.build_problem(choices=[False, True, True])
# Check the results
# Mark correct if and only if ALL (and only) correct choices selected
self.assert_grade(problem, 'choice_0', 'incorrect')
self.assert_grade(problem, 'choice_1', 'incorrect')
self.assert_grade(problem, 'choice_2', 'incorrect')
self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2'], 'incorrect')
self.assert_grade(problem, ['choice_0', 'choice_2'], 'incorrect')
self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect')
self.assert_grade(problem, ['choice_1', 'choice_2'], 'correct')
# Invalid choices should be marked incorrect (we have no choice 3)
self.assert_grade(problem, 'choice_3', 'incorrect')
self.assert_grade(problem, 'not_a_choice', 'incorrect')
def test_named_true_false_grade(self):
problem = self.build_problem(choices=[False, True, True],
choice_names=['foil_1', 'foil_2', 'foil_3'])
# Check the results
# Mark correct if and only if ALL (and only) correct chocies selected
self.assert_grade(problem, 'choice_foil_1', 'incorrect')
self.assert_grade(problem, 'choice_foil_2', 'incorrect')
self.assert_grade(problem, 'choice_foil_3', 'incorrect')
self.assert_grade(problem, ['choice_foil_1', 'choice_foil_2', 'choice_foil_3'], 'incorrect')
self.assert_grade(problem, ['choice_foil_1', 'choice_foil_3'], 'incorrect')
self.assert_grade(problem, ['choice_foil_1', 'choice_foil_2'], 'incorrect')
self.assert_grade(problem, ['choice_foil_2', 'choice_foil_3'], 'correct')
# Invalid choices should be marked incorrect
self.assert_grade(problem, 'choice_foil_4', 'incorrect')
self.assert_grade(problem, 'not_a_choice', 'incorrect')
class ImageResponseTest(ResponseTest):
from capa.tests.response_xml_factory import ImageResponseXMLFactory
xml_factory_class = ImageResponseXMLFactory
def test_rectangle_grade(self):
# Define a rectangle with corners (10,10) and (20,20)
problem = self.build_problem(rectangle="(10,10)-(20,20)")
# Anything inside the rectangle (and along the borders) is correct
# Everything else is incorrect
correct_inputs = ["[12,19]", "[10,10]", "[20,20]",
"[10,15]", "[20,15]", "[15,10]", "[15,20]"]
incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
def test_multiple_rectangles_grade(self):
# Define two rectangles
rectangle_str = "(10,10)-(20,20);(100,100)-(200,200)"
# Expect that only points inside the rectangles are marked correct
problem = self.build_problem(rectangle=rectangle_str)
correct_inputs = ["[12,19]", "[120, 130]"]
incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]",
"[50,55]", "[300, 14]", "[120, 400]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
def test_region_grade(self):
# Define a triangular region with corners (0,0), (5,10), and (0, 10)
region_str = "[ [1,1], [5,10], [0,10] ]"
# Expect that only points inside the triangle are marked correct
problem = self.build_problem(regions=region_str)
correct_inputs = ["[2,4]", "[1,3]"]
incorrect_inputs = ["[0,0]", "[3,5]", "[5,15]", "[30, 12]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
def test_multiple_regions_grade(self):
# Define multiple regions that the user can select
region_str = "[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"
# Expect that only points inside the regions are marked correct
problem = self.build_problem(regions=region_str)
correct_inputs = ["[15,12]", "[110,112]"]
incorrect_inputs = ["[0,0]", "[600,300]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
def test_region_and_rectangle_grade(self):
rectangle_str = "(100,100)-(200,200)"
region_str = "[[10,10], [20,10], [20, 30]]"
# Expect that only points inside the rectangle or region are marked correct
problem = self.build_problem(regions=region_str, rectangle=rectangle_str)
correct_inputs = ["[13,12]", "[110,112]"]
incorrect_inputs = ["[0,0]", "[600,300]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
def test_show_answer(self):
rectangle_str = "(100,100)-(200,200)"
region_str = "[[10,10], [20,10], [20, 30]]"
problem = self.build_problem(regions=region_str, rectangle=rectangle_str)
self.assert_answer_format(problem)
class SymbolicResponseTest(ResponseTest):
from capa.tests.response_xml_factory import SymbolicResponseXMLFactory
xml_factory_class = SymbolicResponseXMLFactory
def test_grade_single_input_correct(self):
problem = self.build_problem(math_display=True, expect="2*x+3*y")
# Correct answers
correct_inputs = [
('2x+3y', textwrap.dedent("""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mn>2</mn><mo>*</mo><mi>x</mi><mo>+</mo><mn>3</mn><mo>*</mo><mi>y</mi>
</mstyle></math>"""),
'snuggletex_2x+3y.xml'),
('x+x+3y', textwrap.dedent("""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mi>x</mi><mo>+</mo><mi>x</mi><mo>+</mo><mn>3</mn><mo>*</mo><mi>y</mi>
</mstyle></math>"""),
'snuggletex_x+x+3y.xml'),
]
for (input_str, input_mathml, server_fixture) in correct_inputs:
print "Testing input: {0}".format(input_str)
server_resp = self._load_fixture(server_fixture)
self._assert_symbolic_grade(
problem, input_str, input_mathml,
'correct', snuggletex_resp=server_resp
)
def test_grade_single_input_incorrect(self):
problem = self.build_problem(math_display=True, expect="2*x+3*y")
# Incorrect answers
incorrect_inputs = [
('0', ''),
('4x+3y', textwrap.dedent("""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mn>4</mn><mo>*</mo><mi>x</mi><mo>+</mo><mn>3</mn><mo>*</mo><mi>y</mi>
</mstyle></math>""")),
]
for (input_str, input_mathml) in incorrect_inputs:
self._assert_symbolic_grade(problem, input_str, input_mathml, 'incorrect')
def test_complex_number_grade_correct(self):
problem = self.build_problem(
math_display=True,
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
options=["matrix", "imaginary"]
)
correct_snuggletex = self._load_fixture('snuggletex_correct.html')
dynamath_input = self._load_fixture('dynamath_input.txt')
student_response = "cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]"
self._assert_symbolic_grade(
problem, student_response, dynamath_input,
'correct',
snuggletex_resp=correct_snuggletex
)
def test_complex_number_grade_incorrect(self):
problem = self.build_problem(math_display=True,
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
options=["matrix", "imaginary"])
wrong_snuggletex = self._load_fixture('snuggletex_wrong.html')
dynamath_input = textwrap.dedent("""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true"><mn>2</mn></mstyle>
</math>
""")
self._assert_symbolic_grade(
problem, "2", dynamath_input,
'incorrect',
snuggletex_resp=wrong_snuggletex,
)
def test_multiple_inputs_exception(self):
# Should not allow multiple inputs, since we specify
# only one "expect" value
with self.assertRaises(Exception):
self.build_problem(math_display=True, expect="2*x+3*y", num_inputs=3)
def _assert_symbolic_grade(
self, problem, student_input, dynamath_input, expected_correctness,
snuggletex_resp=""
):
"""
Assert that the symbolic response has a certain grade.
`problem` is the capa problem containing the symbolic response.
`student_input` is the text the student entered.
`dynamath_input` is the JavaScript rendered MathML from the page.
`expected_correctness` is either "correct" or "incorrect"
`snuggletex_resp` is the simulated response from the Snuggletex server
"""
input_dict = {'1_2_1': str(student_input),
'1_2_1_dynamath': str(dynamath_input)}
# Simulate what the Snuggletex server would respond
with mock.patch.object(requests, 'post') as mock_post:
mock_post.return_value.text = snuggletex_resp
correct_map = problem.grade_answers(input_dict)
self.assertEqual(
correct_map.get_correctness('1_2_1'), expected_correctness
)
@staticmethod
def _load_fixture(relpath):
"""
Return a `unicode` object representing the contents
of the fixture file at `relpath` (relative to the test files dir)
"""
abspath = os.path.join(os.path.dirname(__file__), 'test_files', relpath)
with open(abspath) as fixture_file:
contents = fixture_file.read()
return contents.decode('utf8')
class OptionResponseTest(ResponseTest):
from capa.tests.response_xml_factory import OptionResponseXMLFactory
xml_factory_class = OptionResponseXMLFactory
def test_grade(self):
problem = self.build_problem(options=["first", "second", "third"],
correct_option="second")
# Assert that we get the expected grades
self.assert_grade(problem, "first", "incorrect")
self.assert_grade(problem, "second", "correct")
self.assert_grade(problem, "third", "incorrect")
# Options not in the list should be marked incorrect
self.assert_grade(problem, "invalid_option", "incorrect")
def test_quote_option(self):
# Test that option response properly escapes quotes inside options strings
problem = self.build_problem(options=["hasnot", "hasn't", "has'nt"],
correct_option="hasn't")
# Assert that correct option with a quote inside is marked correctly
self.assert_grade(problem, "hasnot", "incorrect")
self.assert_grade(problem, "hasn't", "correct")
self.assert_grade(problem, "hasn\'t", "correct")
self.assert_grade(problem, "has'nt", "incorrect")
class FormulaResponseTest(ResponseTest):
"""
Test the FormulaResponse class
"""
from capa.tests.response_xml_factory import FormulaResponseXMLFactory
xml_factory_class = FormulaResponseXMLFactory
def test_grade(self):
"""
Test basic functionality of FormulaResponse
Specifically, if it can understand equivalence of formulae
"""
# Sample variables x and y in the range [-10, 10]
sample_dict = {'x': (-10, 10), 'y': (-10, 10)}
# The expected solution is numerically equivalent to x+2y
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance=0.01,
answer="x+2*y")
# Expect an equivalent formula to be marked correct
# 2x - x + y + y = x + 2y
input_formula = "2*x - x + y + y"
self.assert_grade(problem, input_formula, "correct")
# Expect an incorrect formula to be marked incorrect
# x + y != x + 2y
input_formula = "x + y"
self.assert_grade(problem, input_formula, "incorrect")
def test_hint(self):
"""
Test the hint-giving functionality of FormulaResponse
"""
# Sample variables x and y in the range [-10, 10]
sample_dict = {'x': (-10, 10), 'y': (-10, 10)}
# Give a hint if the user leaves off the coefficient
# or leaves out x
hints = [('x + 3*y', 'y_coefficient', 'Check the coefficient of y'),
('2*y', 'missing_x', 'Try including the variable x')]
# The expected solution is numerically equivalent to x+2y
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance=0.01,
answer="x+2*y",
hints=hints)
# Expect to receive a hint if we add an extra y
input_dict = {'1_2_1': "x + 2*y + y"}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
'Check the coefficient of y')
# Expect to receive a hint if we leave out x
input_dict = {'1_2_1': "2*y"}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
'Try including the variable x')
def test_script(self):
"""
Test if python script can be used to generate answers
"""
# Calculate the answer using a script
script = "calculated_ans = 'x+x'"
# Sample x in the range [-10,10]
sample_dict = {'x': (-10, 10)}
# The expected solution is numerically equivalent to 2*x
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance=0.01,
answer="$calculated_ans",
script=script)
# Expect that the inputs are graded correctly
self.assert_grade(problem, '2*x', 'correct')
self.assert_grade(problem, '3*x', 'incorrect')
def test_grade_infinity(self):
"""
Test that a large input on a problem with relative tolerance isn't
erroneously marked as correct.
"""
sample_dict = {'x': (1, 2)}
# Test problem
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance="1%",
answer="x")
# Expect such a large answer to be marked incorrect
input_formula = "x*1e999"
self.assert_grade(problem, input_formula, "incorrect")
# Expect such a large negative answer to be marked incorrect
input_formula = "-x*1e999"
self.assert_grade(problem, input_formula, "incorrect")
def test_grade_nan(self):
"""
Test that expressions that evaluate to NaN are not marked as correct.
"""
sample_dict = {'x': (1, 2)}
# Test problem
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance="1%",
answer="x")
# Expect an incorrect answer (+ nan) to be marked incorrect
# Right now this evaluates to 'nan' for a given x (Python implementation-dependent)
input_formula = "10*x + 0*1e999"
self.assert_grade(problem, input_formula, "incorrect")
# Expect an correct answer (+ nan) to be marked incorrect
input_formula = "x + 0*1e999"
self.assert_grade(problem, input_formula, "incorrect")
def test_raises_zero_division_err(self):
"""
See if division by zero raises an error.
"""
sample_dict = {'x': (1, 2)}
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance="1%",
answer="x") # Answer doesn't matter
input_dict = {'1_2_1': '1/0'}
self.assertRaises(StudentInputError, problem.grade_answers, input_dict)
def test_validate_answer(self):
"""
Makes sure that validate_answer works.
"""
sample_dict = {'x': (1, 2)}
problem = self.build_problem(
sample_dict=sample_dict,
num_samples=10,
tolerance="1%",
answer="x"
)
self.assertTrue(problem.responders.values()[0].validate_answer('14*x'))
self.assertFalse(problem.responders.values()[0].validate_answer('3*y+2*x'))
class StringResponseTest(ResponseTest):
from capa.tests.response_xml_factory import StringResponseXMLFactory
xml_factory_class = StringResponseXMLFactory
def test_backward_compatibility_for_multiple_answers(self):
"""
Remove this test, once support for _or_ separator will be removed.
"""
answers = ["Second", "Third", "Fourth"]
problem = self.build_problem(answer="_or_".join(answers), case_sensitive=True)
for answer in answers:
# Exact string should be correct
self.assert_grade(problem, answer, "correct")
# Other strings and the lowercase version of the string are incorrect
self.assert_grade(problem, "Other String", "incorrect")
problem = self.build_problem(answer="_or_".join(answers), case_sensitive=False)
for answer in answers:
# Exact string should be correct
self.assert_grade(problem, answer, "correct")
self.assert_grade(problem, answer.lower(), "correct")
self.assert_grade(problem, "Other String", "incorrect")
def test_regexp(self):
problem = self.build_problem(answer="Second", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Second", "correct")
problem = self.build_problem(answer="sec", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Second", "incorrect")
problem = self.build_problem(answer="sec.*", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Second", "correct")
problem = self.build_problem(answer="sec.*", case_sensitive=True, regexp=True)
self.assert_grade(problem, "Second", "incorrect")
problem = self.build_problem(answer="Sec.*$", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Second", "correct")
problem = self.build_problem(answer="^sec$", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Second", "incorrect")
problem = self.build_problem(answer="^Sec(ond)?$", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Second", "correct")
problem = self.build_problem(answer="^Sec(ond)?$", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Sec", "correct")
problem = self.build_problem(answer="tre+", case_sensitive=False, regexp=True)
self.assert_grade(problem, "There is a tree", "incorrect")
problem = self.build_problem(answer=".*tre+", case_sensitive=False, regexp=True)
self.assert_grade(problem, "There is a tree", "correct")
# test with case_sensitive not specified
problem = self.build_problem(answer=".*tre+", regexp=True)
self.assert_grade(problem, "There is a tree", "correct")
answers = [
"Martin Luther King Junior",
"Doctor Martin Luther King Junior",
"Dr. Martin Luther King Jr.",
"Martin Luther King"
]
problem = self.build_problem(answer="\w*\.?.*Luther King\s*.*", case_sensitive=True, regexp=True)
for answer in answers:
self.assert_grade(problem, answer, "correct")
problem = self.build_problem(answer="^(-\|){2,5}$", case_sensitive=False, regexp=True)
self.assert_grade(problem, "-|-|-|", "correct")
self.assert_grade(problem, "-|", "incorrect")
self.assert_grade(problem, "-|-|-|-|-|-|", "incorrect")
regexps = [
"^One$",
"two",
"^thre+",
"^4|Four$",
]
problem = self.build_problem(
answer="just_sample",
case_sensitive=False,
regexp=True,
additional_answers=regexps
)
self.assert_grade(problem, "One", "correct")
self.assert_grade(problem, "two", "correct")
self.assert_grade(problem, "!!two!!", "correct")
self.assert_grade(problem, "threeeee", "correct")
self.assert_grade(problem, "three", "correct")
self.assert_grade(problem, "4", "correct")
self.assert_grade(problem, "Four", "correct")
self.assert_grade(problem, "Five", "incorrect")
self.assert_grade(problem, "|", "incorrect")
# test unicode
problem = self.build_problem(answer=u"æ", case_sensitive=False, regexp=True, additional_answers=[u'ö'])
self.assert_grade(problem, u"æ", "correct")
self.assert_grade(problem, u"ö", "correct")
self.assert_grade(problem, u"î", "incorrect")
self.assert_grade(problem, u"o", "incorrect")
def test_backslash_and_unicode_regexps(self):
"""
Test some special cases of [unicode] regexps.
One needs to use either r'' strings or write real `repr` of unicode strings, because of the following
(from python docs, http://docs.python.org/2/library/re.html):
'for example, to match a literal backslash, one might have to write '\\\\' as the pattern string,
because the regular expression must be \\,
and each backslash must be expressed as \\ inside a regular Python string literal.'
Example of real use case in Studio:
a) user inputs regexp in usual regexp language,
b) regexp is saved to xml and is read in python as repr of that string
So a\d in front-end editor will become a\\\\d in xml, so it will match a1 as student answer.
"""
problem = self.build_problem(answer=ur"5\\æ", case_sensitive=False, regexp=True)
self.assert_grade(problem, u"5\æ", "correct")
problem = self.build_problem(answer=u"5\\\\æ", case_sensitive=False, regexp=True)
self.assert_grade(problem, u"5\æ", "correct")
def test_backslash(self):
problem = self.build_problem(answer=u"a\\\\c1", case_sensitive=False, regexp=True)
self.assert_grade(problem, u"a\c1", "correct")
def test_special_chars(self):
problem = self.build_problem(answer=ur"a \s1", case_sensitive=False, regexp=True)
self.assert_grade(problem, u"a 1", "correct")
def test_case_sensitive(self):
# Test single answer
problem_specified = self.build_problem(answer="Second", case_sensitive=True)
# should also be case_sensitive if case sensitivity is not specified
problem_not_specified = self.build_problem(answer="Second")
problems = [problem_specified, problem_not_specified]
for problem in problems:
# Exact string should be correct
self.assert_grade(problem, "Second", "correct")
# Other strings and the lowercase version of the string are incorrect
self.assert_grade(problem, "Other String", "incorrect")
self.assert_grade(problem, "second", "incorrect")
# Test multiple answers
answers = ["Second", "Third", "Fourth"]
# set up problems
problem_specified = self.build_problem(
answer="sample_answer", case_sensitive=True, additional_answers=answers
)
problem_not_specified = self.build_problem(
answer="sample_answer", additional_answers=answers
)
problems = [problem_specified, problem_not_specified]
for problem in problems:
for answer in answers:
# Exact string should be correct
self.assert_grade(problem, answer, "correct")
# Other strings and the lowercase version of the string are incorrect
self.assert_grade(problem, "Other String", "incorrect")
self.assert_grade(problem, "second", "incorrect")
def test_bogus_escape_not_raised(self):
"""
We now adding ^ and $ around regexp, so no bogus escape error will be raised.
"""
problem = self.build_problem(answer=u"\\", case_sensitive=False, regexp=True)
self.assert_grade(problem, u"\\", "incorrect")
# right way to search for \
problem = self.build_problem(answer=u"\\\\", case_sensitive=False, regexp=True)
self.assert_grade(problem, u"\\", "correct")
def test_case_insensitive(self):
# Test single answer
problem = self.build_problem(answer="Second", case_sensitive=False)
# Both versions of the string should be allowed, regardless
# of capitalization
self.assert_grade(problem, "Second", "correct")
self.assert_grade(problem, "second", "correct")
# Other strings are not allowed
self.assert_grade(problem, "Other String", "incorrect")
# Test multiple answers
answers = ["Second", "Third", "Fourth"]
problem = self.build_problem(answer="sample_answer", case_sensitive=False, additional_answers=answers)
for answer in answers:
# Exact string should be correct
self.assert_grade(problem, answer, "correct")
self.assert_grade(problem, answer.lower(), "correct")
# Other strings and the lowercase version of the string are incorrect
self.assert_grade(problem, "Other String", "incorrect")
def test_partial_matching(self):
problem = self.build_problem(answer="a2", case_sensitive=False, regexp=True, additional_answers=['.?\\d.?'])
self.assert_grade(problem, "a3", "correct")
self.assert_grade(problem, "3a", "correct")
def test_exception(self):
problem = self.build_problem(answer="a2", case_sensitive=False, regexp=True, additional_answers=['?\\d?'])
with self.assertRaises(Exception) as cm:
self.assert_grade(problem, "a3", "correct")
exception_message = cm.exception.message
self.assertIn("nothing to repeat", exception_message)
def test_hints(self):
hints = [
("wisconsin", "wisc", "The state capital of Wisconsin is Madison"),
("minnesota", "minn", "The state capital of Minnesota is St. Paul"),
]
problem = self.build_problem(
answer="Michigan",
case_sensitive=False,
hints=hints,
)
# We should get a hint for Wisconsin
input_dict = {'1_2_1': 'Wisconsin'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
"The state capital of Wisconsin is Madison")
# We should get a hint for Minnesota
input_dict = {'1_2_1': 'Minnesota'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
"The state capital of Minnesota is St. Paul")
# We should NOT get a hint for Michigan (the correct answer)
input_dict = {'1_2_1': 'Michigan'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "")
# We should NOT get a hint for any other string
input_dict = {'1_2_1': 'California'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "")
def test_hints_regexp_and_answer_regexp(self):
different_student_answers = [
"May be it is Boston",
"Boston, really?",
"Boston",
"OK, I see, this is Boston",
]
# if problem has regexp = true, it will accept hints written in regexp
hints = [
("wisconsin", "wisc", "The state capital of Wisconsin is Madison"),
("minnesota", "minn", "The state capital of Minnesota is St. Paul"),
(".*Boston.*", "bst", "First letter of correct answer is M."),
('^\\d9$', "numbers", "Should not end with 9."),
]
additional_answers = [
'^\\d[0-8]$',
]
problem = self.build_problem(
answer="Michigan",
case_sensitive=False,
hints=hints,
additional_answers=additional_answers,
regexp=True
)
# We should get a hint for Wisconsin
input_dict = {'1_2_1': 'Wisconsin'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
"The state capital of Wisconsin is Madison")
# We should get a hint for Minnesota
input_dict = {'1_2_1': 'Minnesota'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
"The state capital of Minnesota is St. Paul")
# We should NOT get a hint for Michigan (the correct answer)
input_dict = {'1_2_1': 'Michigan'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "")
# We should NOT get a hint for any other string
input_dict = {'1_2_1': 'California'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "")
# We should get the same hint for each answer
for answer in different_student_answers:
input_dict = {'1_2_1': answer}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "First letter of correct answer is M.")
input_dict = {'1_2_1': '59'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "Should not end with 9.")
input_dict = {'1_2_1': '57'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "")
def test_computed_hints(self):
problem = self.build_problem(
answer="Michigan",
hintfn="gimme_a_hint",
script=textwrap.dedent("""
def gimme_a_hint(answer_ids, student_answers, new_cmap, old_cmap):
aid = answer_ids[0]
answer = student_answers[aid]
new_cmap.set_hint_and_mode(aid, answer+"??", "always")
""")
)
input_dict = {'1_2_1': 'Hello'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "Hello??")
def test_hint_function_randomization(self):
# The hint function should get the seed from the problem.
problem = self.build_problem(
answer="1",
hintfn="gimme_a_random_hint",
script=textwrap.dedent("""
def gimme_a_random_hint(answer_ids, student_answers, new_cmap, old_cmap):
answer = {code}
new_cmap.set_hint_and_mode(answer_ids[0], answer, "always")
""".format(code=self._get_random_number_code()))
)
correct_map = problem.grade_answers({'1_2_1': '2'})
hint = correct_map.get_hint('1_2_1')
self.assertEqual(hint, self._get_random_number_result(problem.seed))
class CodeResponseTest(ResponseTest):
from capa.tests.response_xml_factory import CodeResponseXMLFactory
xml_factory_class = CodeResponseXMLFactory
def setUp(self):
super(CodeResponseTest, self).setUp()
grader_payload = json.dumps({"grader": "ps04/grade_square.py"})
self.problem = self.build_problem(initial_display="def square(x):",
answer_display="answer",
grader_payload=grader_payload,
num_responses=2)
@staticmethod
def make_queuestate(key, time):
"""Create queuestate dict"""
timestr = datetime.strftime(time, dateformat)
return {'key': key, 'time': timestr}
def test_is_queued(self):
"""
Simple test of whether LoncapaProblem knows when it's been queued
"""
answer_ids = sorted(self.problem.get_question_answers())
# CodeResponse requires internal CorrectMap state. Build it now in the unqueued state
cmap = CorrectMap()
for answer_id in answer_ids:
cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))
self.problem.correct_map.update(cmap)
self.assertEquals(self.problem.is_queued(), False)
# Now we queue the LCP
cmap = CorrectMap()
for i, answer_id in enumerate(answer_ids):
queuestate = CodeResponseTest.make_queuestate(i, datetime.now(UTC))
cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
self.problem.correct_map.update(cmap)
self.assertEquals(self.problem.is_queued(), True)
def test_update_score(self):
'''
Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem
'''
answer_ids = sorted(self.problem.get_question_answers())
# CodeResponse requires internal CorrectMap state. Build it now in the queued state
old_cmap = CorrectMap()
for i, answer_id in enumerate(answer_ids):
queuekey = 1000 + i
queuestate = CodeResponseTest.make_queuestate(queuekey, datetime.now(UTC))
old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
# Message format common to external graders
grader_msg = '<span>MESSAGE</span>' # Must be valid XML
correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg})
incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg})
xserver_msgs = {'correct': correct_score_msg,
'incorrect': incorrect_score_msg, }
# Incorrect queuekey, state should not be updated
for correctness in ['correct', 'incorrect']:
self.problem.correct_map = CorrectMap()
self.problem.correct_map.update(old_cmap) # Deep copy
self.problem.update_score(xserver_msgs[correctness], queuekey=0)
self.assertEquals(self.problem.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison
for answer_id in answer_ids:
self.assertTrue(self.problem.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered
# Correct queuekey, state should be updated
for correctness in ['correct', 'incorrect']:
for i, answer_id in enumerate(answer_ids):
self.problem.correct_map = CorrectMap()
self.problem.correct_map.update(old_cmap)
new_cmap = CorrectMap()
new_cmap.update(old_cmap)
npoints = 1 if correctness == 'correct' else 0
new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None)
self.problem.update_score(xserver_msgs[correctness], queuekey=1000 + i)
self.assertEquals(self.problem.correct_map.get_dict(), new_cmap.get_dict())
for j, test_id in enumerate(answer_ids):
if j == i:
self.assertFalse(self.problem.correct_map.is_queued(test_id)) # Should be dequeued, message delivered
else:
self.assertTrue(self.problem.correct_map.is_queued(test_id)) # Should be queued, message undelivered
def test_recentmost_queuetime(self):
'''
Test whether the LoncapaProblem knows about the time of queue requests
'''
answer_ids = sorted(self.problem.get_question_answers())
# CodeResponse requires internal CorrectMap state. Build it now in the unqueued state
cmap = CorrectMap()
for answer_id in answer_ids:
cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))
self.problem.correct_map.update(cmap)
self.assertEquals(self.problem.get_recentmost_queuetime(), None)
# CodeResponse requires internal CorrectMap state. Build it now in the queued state
cmap = CorrectMap()
for i, answer_id in enumerate(answer_ids):
queuekey = 1000 + i
latest_timestamp = datetime.now(UTC)
queuestate = CodeResponseTest.make_queuestate(queuekey, latest_timestamp)
cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate))
self.problem.correct_map.update(cmap)
# Queue state only tracks up to second
latest_timestamp = datetime.strptime(
datetime.strftime(latest_timestamp, dateformat), dateformat
).replace(tzinfo=UTC)
self.assertEquals(self.problem.get_recentmost_queuetime(), latest_timestamp)
def test_convert_files_to_filenames(self):
'''
Test whether file objects are converted to filenames without altering other structures
'''
problem_file = os.path.join(os.path.dirname(__file__), "test_files/filename_convert_test.txt")
with open(problem_file) as fp:
answers_with_file = {'1_2_1': 'String-based answer',
'1_3_1': ['answer1', 'answer2', 'answer3'],
'1_4_1': [fp, fp]}
answers_converted = convert_files_to_filenames(answers_with_file)
self.assertEquals(answers_converted['1_2_1'], 'String-based answer')
self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3'])
self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name])
def test_parse_score_msg_of_responder(self):
"""
Test whether LoncapaProblem._parse_score_msg correcly parses valid HTML5 html.
"""
valid_grader_msgs = [
u'<span>MESSAGE</span>', # Valid XML
textwrap.dedent("""
<div class='matlabResponse'><div id='mwAudioPlaceHolder'>
<audio controls autobuffer autoplay src='data:audio/wav;base64='>Audio is not supported on this browser.</audio>
<div>Right click <a href=https://endpoint.mss-mathworks.com/media/filename.wav>here</a> and click \"Save As\" to download the file</div></div>
<div style='white-space:pre' class='commandWindowOutput'></div><ul></ul></div>
""").replace('\n', ''), # Valid HTML5 real case Matlab response, invalid XML
'<aaa></bbb>' # Invalid XML, but will be parsed by html5lib to <aaa/>
]
invalid_grader_msgs = [
'<audio', # invalid XML and HTML5
]
answer_ids = sorted(self.problem.get_question_answers())
# CodeResponse requires internal CorrectMap state. Build it now in the queued state
old_cmap = CorrectMap()
for i, answer_id in enumerate(answer_ids):
queuekey = 1000 + i
queuestate = CodeResponseTest.make_queuestate(queuekey, datetime.now(UTC))
old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
for grader_msg in valid_grader_msgs:
correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg})
incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg})
xserver_msgs = {'correct': correct_score_msg, 'incorrect': incorrect_score_msg, }
for i, answer_id in enumerate(answer_ids):
self.problem.correct_map = CorrectMap()
self.problem.correct_map.update(old_cmap)
output = self.problem.update_score(xserver_msgs['correct'], queuekey=1000 + i)
self.assertEquals(output[answer_id]['msg'], grader_msg)
for grader_msg in invalid_grader_msgs:
correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg})
incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg})
xserver_msgs = {'correct': correct_score_msg, 'incorrect': incorrect_score_msg, }
for i, answer_id in enumerate(answer_ids):
self.problem.correct_map = CorrectMap()
self.problem.correct_map.update(old_cmap)
output = self.problem.update_score(xserver_msgs['correct'], queuekey=1000 + i)
self.assertEquals(output[answer_id]['msg'], u'Invalid grader reply. Please contact the course staff.')
class ChoiceResponseTest(ResponseTest):
from capa.tests.response_xml_factory import ChoiceResponseXMLFactory
xml_factory_class = ChoiceResponseXMLFactory
def test_radio_group_grade(self):
problem = self.build_problem(choice_type='radio',
choices=[False, True, False])
# Check that we get the expected results
self.assert_grade(problem, 'choice_0', 'incorrect')
self.assert_grade(problem, 'choice_1', 'correct')
self.assert_grade(problem, 'choice_2', 'incorrect')
# No choice 3 exists --> mark incorrect
self.assert_grade(problem, 'choice_3', 'incorrect')
def test_checkbox_group_grade(self):
problem = self.build_problem(choice_type='checkbox',
choices=[False, True, True])
# Check that we get the expected results
# (correct if and only if BOTH correct choices chosen)
self.assert_grade(problem, ['choice_1', 'choice_2'], 'correct')
self.assert_grade(problem, 'choice_1', 'incorrect')
self.assert_grade(problem, 'choice_2', 'incorrect')
self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect')
self.assert_grade(problem, ['choice_0', 'choice_2'], 'incorrect')
# No choice 3 exists --> mark incorrect
self.assert_grade(problem, 'choice_3', 'incorrect')
class JavascriptResponseTest(ResponseTest):
from capa.tests.response_xml_factory import JavascriptResponseXMLFactory
xml_factory_class = JavascriptResponseXMLFactory
def test_grade(self):
# Compile coffee files into javascript used by the response
coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee"
os.system("node_modules/.bin/coffee -c %s" % (coffee_file_path))
capa_system = test_capa_system()
capa_system.can_execute_unsafe_code = lambda: True
problem = self.build_problem(
capa_system=capa_system,
generator_src="test_problem_generator.js",
grader_src="test_problem_grader.js",
display_class="TestProblemDisplay",
display_src="test_problem_display.js",
param_dict={'value': '4'},
)
# Test that we get graded correctly
self.assert_grade(problem, json.dumps({0: 4}), "correct")
self.assert_grade(problem, json.dumps({0: 5}), "incorrect")
def test_cant_execute_javascript(self):
# If the system says to disallow unsafe code execution, then making
# this problem will raise an exception.
capa_system = test_capa_system()
capa_system.can_execute_unsafe_code = lambda: False
with self.assertRaises(LoncapaProblemError):
self.build_problem(
capa_system=capa_system,
generator_src="test_problem_generator.js",
grader_src="test_problem_grader.js",
display_class="TestProblemDisplay",
display_src="test_problem_display.js",
param_dict={'value': '4'},
)
class NumericalResponseTest(ResponseTest):
from capa.tests.response_xml_factory import NumericalResponseXMLFactory
xml_factory_class = NumericalResponseXMLFactory
# We blend the line between integration (using evaluator) and exclusively
# unit testing the NumericalResponse (mocking out the evaluator)
# For simple things its not worth the effort.
def test_grade_range_tolerance(self):
problem_setup = [
# [given_asnwer, [list of correct responses], [list of incorrect responses]]
['[5, 7)', ['5', '6', '6.999'], ['4.999', '7']],
['[1.6e-5, 1.9e24)', ['0.000016', '1.6*10^-5', '1.59e24'], ['1.59e-5', '1.9e24', '1.9*10^24']],
['[0, 1.6e-5]', ['1.6*10^-5'], ["2"]],
['(1.6e-5, 10]', ["2"], ['1.6*10^-5']],
]
for given_answer, correct_responses, incorrect_responses in problem_setup:
problem = self.build_problem(answer=given_answer)
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_range_tolerance_exceptions(self):
# no complex number in range tolerance staff answer
problem = self.build_problem(answer='[1j, 5]')
input_dict = {'1_2_1': '3'}
with self.assertRaises(StudentInputError):
problem.grade_answers(input_dict)
# no complex numbers in student ansers to range tolerance problems
problem = self.build_problem(answer='(1, 5)')
input_dict = {'1_2_1': '1*J'}
with self.assertRaises(StudentInputError):
problem.grade_answers(input_dict)
# test isnan student input: no exception,
# but problem should be graded as incorrect
problem = self.build_problem(answer='(1, 5)')
input_dict = {'1_2_1': ''}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
self.assertEqual(correctness, 'incorrect')
# test invalid range tolerance answer
with self.assertRaises(StudentInputError):
problem = self.build_problem(answer='(1 5)')
# test empty boundaries
problem = self.build_problem(answer='(1, ]')
input_dict = {'1_2_1': '3'}
with self.assertRaises(StudentInputError):
problem.grade_answers(input_dict)
def test_grade_exact(self):
problem = self.build_problem(answer=4)
correct_responses = ["4", "4.0", "4.00"]
incorrect_responses = ["", "3.9", "4.1", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_decimal_tolerance(self):
problem = self.build_problem(answer=4, tolerance=0.1)
correct_responses = ["4.0", "4.00", "4.09", "3.91"]
incorrect_responses = ["", "4.11", "3.89", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_percent_tolerance(self):
problem = self.build_problem(answer=4, tolerance="10%")
correct_responses = ["4.0", "4.3", "3.7", "4.30", "3.70"]
incorrect_responses = ["", "4.5", "3.5", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_floats(self):
"""
Default tolerance for all responsetypes is 1e-3%.
"""
problem_setup = [
# [given_answer, [list of correct responses], [list of incorrect responses]]
[1, ["1"], ["1.1"]],
[2.0, ["2.0"], ["1.0"]],
[4, ["4.0", "4.00004"], ["4.00005"]],
[0.00016, ["1.6*10^-4"], [""]],
[0.000016, ["1.6*10^-5"], ["0.000165"]],
[1.9e24, ["1.9*10^24"], ["1.9001*10^24"]],
[2e-15, ["2*10^-15"], [""]],
[3141592653589793238., ["3141592653589793115."], [""]],
[0.1234567, ["0.123456", "0.1234561"], ["0.123451"]],
[1e-5, ["1e-5", "1.0e-5"], ["-1e-5", "2*1e-5"]],
]
for given_answer, correct_responses, incorrect_responses in problem_setup:
problem = self.build_problem(answer=given_answer)
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_with_script(self):
script_text = "computed_response = math.sqrt(4)"
problem = self.build_problem(answer="$computed_response", script=script_text)
correct_responses = ["2", "2.0"]
incorrect_responses = ["", "2.01", "1.99", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_raises_zero_division_err(self):
"""See if division by zero is handled correctly."""
problem = self.build_problem(answer="1") # Answer doesn't matter
input_dict = {'1_2_1': '1/0'}
with self.assertRaises(StudentInputError):
problem.grade_answers(input_dict)
def test_staff_inputs_expressions(self):
"""Test that staff may enter in an expression as the answer."""
problem = self.build_problem(answer="1/3", tolerance=1e-3)
correct_responses = ["1/3", "0.333333"]
incorrect_responses = []
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_staff_inputs_expressions_legacy(self):
"""Test that staff may enter in a complex number as the answer."""
problem = self.build_problem(answer="1+1j", tolerance=1e-3)
self.assert_grade(problem, '1+j', 'correct')
@mock.patch('capa.responsetypes.log')
def test_staff_inputs_bad_syntax(self, mock_log):
"""Test that staff may enter in a complex number as the answer."""
staff_ans = "clearly bad syntax )[+1e"
problem = self.build_problem(answer=staff_ans, tolerance=1e-3)
msg = "There was a problem with the staff answer to this problem"
with self.assertRaisesRegexp(StudentInputError, msg):
self.assert_grade(problem, '1+j', 'correct')
mock_log.debug.assert_called_once_with(
"Content error--answer '%s' is not a valid number", staff_ans
)
@mock.patch('capa.responsetypes.log')
def test_responsetype_i18n(self, mock_log):
"""Test that LoncapaSystem has an i18n that works."""
staff_ans = "clearly bad syntax )[+1e"
problem = self.build_problem(answer=staff_ans, tolerance=1e-3)
class FakeTranslations(object):
"""A fake gettext.Translations object."""
def ugettext(self, text):
"""Return the 'translation' of `text`."""
if text == "There was a problem with the staff answer to this problem.":
text = "TRANSLATED!"
return text
problem.capa_system.i18n = FakeTranslations()
with self.assertRaisesRegexp(StudentInputError, "TRANSLATED!"):
self.assert_grade(problem, '1+j', 'correct')
def test_grade_infinity(self):
"""
Check that infinity doesn't automatically get marked correct.
This resolves a bug where a problem with relative tolerance would
pass with any arbitrarily large student answer.
"""
mapping = {
'some big input': float('inf'),
'some neg input': -float('inf'),
'weird NaN input': float('nan'),
'4': 4
}
def evaluator_side_effect(_, __, math_string):
"""Look up the given response for `math_string`."""
return mapping[math_string]
problem = self.build_problem(answer=4, tolerance='10%')
with mock.patch('capa.responsetypes.evaluator') as mock_eval:
mock_eval.side_effect = evaluator_side_effect
self.assert_grade(problem, 'some big input', 'incorrect')
self.assert_grade(problem, 'some neg input', 'incorrect')
self.assert_grade(problem, 'weird NaN input', 'incorrect')
def test_err_handling(self):
"""
See that `StudentInputError`s are raised when things go wrong.
"""
problem = self.build_problem(answer=4)
errors = [ # (exception raised, message to student)
(calc.UndefinedVariable("x"), r"You may not use variables \(x\) in numerical problems"),
(ValueError("factorial() mess-up"), "factorial function evaluated outside its domain"),
(ValueError(), "Could not interpret '.*' as a number"),
(pyparsing.ParseException("oopsie"), "Invalid math syntax"),
(ZeroDivisionError(), "Could not interpret '.*' as a number")
]
with mock.patch('capa.responsetypes.evaluator') as mock_eval:
for err, msg_regex in errors:
def evaluator_side_effect(_, __, math_string):
"""Raise an error only for the student input."""
if math_string != '4':
raise err
mock_eval.side_effect = evaluator_side_effect
with self.assertRaisesRegexp(StudentInputError, msg_regex):
problem.grade_answers({'1_2_1': 'foobar'})
def test_compare_answer(self):
"""Tests the answer compare function."""
problem = self.build_problem(answer="42")
responder = problem.responders.values()[0]
self.assertTrue(responder.compare_answer('48', '8*6'))
self.assertFalse(responder.compare_answer('48', '9*5'))
def test_validate_answer(self):
"""Tests the answer validation function."""
problem = self.build_problem(answer="42")
responder = problem.responders.values()[0]
self.assertTrue(responder.validate_answer('23.5'))
self.assertFalse(responder.validate_answer('fish'))
class CustomResponseTest(ResponseTest):
from capa.tests.response_xml_factory import CustomResponseXMLFactory
xml_factory_class = CustomResponseXMLFactory
def test_inline_code(self):
# For inline code, we directly modify global context variables
# 'answers' is a list of answers provided to us
# 'correct' is a list we fill in with True/False
# 'expect' is given to us (if provided in the XML)
inline_script = """correct[0] = 'correct' if (answers['1_2_1'] == expect) else 'incorrect'"""
problem = self.build_problem(answer=inline_script, expect="42")
# Check results
self.assert_grade(problem, '42', 'correct')
self.assert_grade(problem, '0', 'incorrect')
def test_inline_message(self):
# Inline code can update the global messages list
# to pass messages to the CorrectMap for a particular input
# The code can also set the global overall_message (str)
# to pass a message that applies to the whole response
inline_script = textwrap.dedent("""
messages[0] = "Test Message"
overall_message = "Overall message"
""")
problem = self.build_problem(answer=inline_script)
input_dict = {'1_2_1': '0'}
correctmap = problem.grade_answers(input_dict)
# Check that the message for the particular input was received
input_msg = correctmap.get_msg('1_2_1')
self.assertEqual(input_msg, "Test Message")
# Check that the overall message (for the whole response) was received
overall_msg = correctmap.get_overall_message()
self.assertEqual(overall_msg, "Overall message")
def test_inline_randomization(self):
# Make sure the seed from the problem gets fed into the script execution.
inline_script = "messages[0] = {code}".format(code=self._get_random_number_code())
problem = self.build_problem(answer=inline_script)
input_dict = {'1_2_1': '0'}
correctmap = problem.grade_answers(input_dict)
input_msg = correctmap.get_msg('1_2_1')
self.assertEqual(input_msg, self._get_random_number_result(problem.seed))
def test_function_code_single_input(self):
# For function code, we pass in these arguments:
#
# 'expect' is the expect attribute of the <customresponse>
#
# 'answer_given' is the answer the student gave (if there is just one input)
# or an ordered list of answers (if there are multiple inputs)
#
# The function should return a dict of the form
# { 'ok': BOOL, 'msg': STRING }
#
script = textwrap.dedent("""
def check_func(expect, answer_given):
return {'ok': answer_given == expect, 'msg': 'Message text'}
""")
problem = self.build_problem(script=script, cfn="check_func", expect="42")
# Correct answer
input_dict = {'1_2_1': '42'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
self.assertEqual(correctness, 'correct')
self.assertEqual(msg, "Message text")
# Incorrect answer
input_dict = {'1_2_1': '0'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
self.assertEqual(correctness, 'incorrect')
self.assertEqual(msg, "Message text")
def test_function_code_multiple_input_no_msg(self):
# Check functions also have the option of returning
# a single boolean value
# If true, mark all the inputs correct
# If false, mark all the inputs incorrect
script = textwrap.dedent("""
def check_func(expect, answer_given):
return (answer_given[0] == expect and
answer_given[1] == expect)
""")
problem = self.build_problem(script=script, cfn="check_func",
expect="42", num_inputs=2)
# Correct answer -- expect both inputs marked correct
input_dict = {'1_2_1': '42', '1_2_2': '42'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
self.assertEqual(correctness, 'correct')
correctness = correct_map.get_correctness('1_2_2')
self.assertEqual(correctness, 'correct')
# One answer incorrect -- expect both inputs marked incorrect
input_dict = {'1_2_1': '0', '1_2_2': '42'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
self.assertEqual(correctness, 'incorrect')
correctness = correct_map.get_correctness('1_2_2')
self.assertEqual(correctness, 'incorrect')
def test_function_code_multiple_inputs(self):
# If the <customresponse> has multiple inputs associated with it,
# the check function can return a dict of the form:
#
# {'overall_message': STRING,
# 'input_list': [{'ok': BOOL, 'msg': STRING}, ...] }
#
# 'overall_message' is displayed at the end of the response
#
# 'input_list' contains dictionaries representing the correctness
# and message for each input.
script = textwrap.dedent("""
def check_func(expect, answer_given):
check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3)
return {'overall_message': 'Overall message',
'input_list': [
{'ok': check1, 'msg': 'Feedback 1'},
{'ok': check2, 'msg': 'Feedback 2'},
{'ok': check3, 'msg': 'Feedback 3'} ] }
""")
problem = self.build_problem(script=script,
cfn="check_func", num_inputs=3)
# Grade the inputs (one input incorrect)
input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Expect that we receive the overall message (for the whole response)
self.assertEqual(correct_map.get_overall_message(), "Overall message")
# Expect that the inputs were graded individually
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct')
# Expect that we received messages for each individual input
self.assertEqual(correct_map.get_msg('1_2_1'), 'Feedback 1')
self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2')
self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3')
def test_function_code_with_extra_args(self):
script = textwrap.dedent("""\
def check_func(expect, answer_given, options, dynamath):
assert options == "xyzzy", "Options was %r" % options
return {'ok': answer_given == expect, 'msg': 'Message text'}
""")
problem = self.build_problem(script=script, cfn="check_func", expect="42", options="xyzzy", cfn_extra_args="options dynamath")
# Correct answer
input_dict = {'1_2_1': '42'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
self.assertEqual(correctness, 'correct')
self.assertEqual(msg, "Message text")
# Incorrect answer
input_dict = {'1_2_1': '0'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
self.assertEqual(correctness, 'incorrect')
self.assertEqual(msg, "Message text")
def test_multiple_inputs_return_one_status(self):
# When given multiple inputs, the 'answer_given' argument
# to the check_func() is a list of inputs
#
# The sample script below marks the problem as correct
# if and only if it receives answer_given=[1,2,3]
# (or string values ['1','2','3'])
#
# Since we return a dict describing the status of one input,
# we expect that the same 'ok' value is applied to each
# of the inputs.
script = textwrap.dedent("""
def check_func(expect, answer_given):
check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3)
return {'ok': (check1 and check2 and check3),
'msg': 'Message text'}
""")
problem = self.build_problem(script=script,
cfn="check_func", num_inputs=3)
# Grade the inputs (one input incorrect)
input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Everything marked incorrect
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'incorrect')
# Grade the inputs (everything correct)
input_dict = {'1_2_1': '1', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Everything marked incorrect
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct')
# Message is interpreted as an "overall message"
self.assertEqual(correct_map.get_overall_message(), 'Message text')
def test_script_exception_function(self):
# Construct a script that will raise an exception
script = textwrap.dedent("""
def check_func(expect, answer_given):
raise Exception("Test")
""")
problem = self.build_problem(script=script, cfn="check_func")
# Expect that an exception gets raised when we check the answer
with self.assertRaises(ResponseError):
problem.grade_answers({'1_2_1': '42'})
def test_script_exception_inline(self):
# Construct a script that will raise an exception
script = 'raise Exception("Test")'
problem = self.build_problem(answer=script)
# Expect that an exception gets raised when we check the answer
with self.assertRaises(ResponseError):
problem.grade_answers({'1_2_1': '42'})
def test_invalid_dict_exception(self):
# Construct a script that passes back an invalid dict format
script = textwrap.dedent("""
def check_func(expect, answer_given):
return {'invalid': 'test'}
""")
problem = self.build_problem(script=script, cfn="check_func")
# Expect that an exception gets raised when we check the answer
with self.assertRaises(ResponseError):
problem.grade_answers({'1_2_1': '42'})
def test_setup_randomization(self):
# Ensure that the problem setup script gets the random seed from the problem.
script = textwrap.dedent("""
num = {code}
""".format(code=self._get_random_number_code()))
problem = self.build_problem(script=script)
self.assertEqual(problem.context['num'], self._get_random_number_result(problem.seed))
def test_check_function_randomization(self):
# The check function should get random-seeded from the problem.
script = textwrap.dedent("""
def check_func(expect, answer_given):
return {{'ok': True, 'msg': {code} }}
""".format(code=self._get_random_number_code()))
problem = self.build_problem(script=script, cfn="check_func", expect="42")
input_dict = {'1_2_1': '42'}
correct_map = problem.grade_answers(input_dict)
msg = correct_map.get_msg('1_2_1')
self.assertEqual(msg, self._get_random_number_result(problem.seed))
def test_random_isnt_none(self):
# Bug LMS-500 says random.seed(10) fails with:
# File "<string>", line 61, in <module>
# File "/usr/lib/python2.7/random.py", line 116, in seed
# super(Random, self).seed(a)
# TypeError: must be type, not None
r = random.Random()
r.seed(10)
num = r.randint(0, 1e9)
script = textwrap.dedent("""
random.seed(10)
num = random.randint(0, 1e9)
""")
problem = self.build_problem(script=script)
self.assertEqual(problem.context['num'], num)
def test_module_imports_inline(self):
'''
Check that the correct modules are available to custom
response scripts
'''
for module_name in ['random', 'numpy', 'math', 'scipy',
'calc', 'eia', 'chemcalc', 'chemtools',
'miller', 'draganddrop']:
# Create a script that checks that the name is defined
# If the name is not defined, then the script
# will raise an exception
script = textwrap.dedent('''
correct[0] = 'correct'
assert('%s' in globals())''' % module_name)
# Create the problem
problem = self.build_problem(answer=script)
# Expect that we can grade an answer without
# getting an exception
try:
problem.grade_answers({'1_2_1': '42'})
except ResponseError:
self.fail("Could not use name '{0}s' in custom response".format(module_name))
def test_module_imports_function(self):
'''
Check that the correct modules are available to custom
response scripts
'''
for module_name in ['random', 'numpy', 'math', 'scipy',
'calc', 'eia', 'chemcalc', 'chemtools',
'miller', 'draganddrop']:
# Create a script that checks that the name is defined
# If the name is not defined, then the script
# will raise an exception
script = textwrap.dedent('''
def check_func(expect, answer_given):
assert('%s' in globals())
return True''' % module_name)
# Create the problem
problem = self.build_problem(script=script, cfn="check_func")
# Expect that we can grade an answer without
# getting an exception
try:
problem.grade_answers({'1_2_1': '42'})
except ResponseError:
self.fail("Could not use name '{0}s' in custom response".format(module_name))
class SchematicResponseTest(ResponseTest):
from capa.tests.response_xml_factory import SchematicResponseXMLFactory
xml_factory_class = SchematicResponseXMLFactory
def test_grade(self):
# Most of the schematic-specific work is handled elsewhere
# (in client-side JavaScript)
# The <schematicresponse> is responsible only for executing the
# Python code in <answer> with *submission* (list)
# in the global context.
# To test that the context is set up correctly,
# we create a script that sets *correct* to true
# if and only if we find the *submission* (list)
script = "correct = ['correct' if 'test' in submission[0] else 'incorrect']"
problem = self.build_problem(answer=script)
# The actual dictionary would contain schematic information
# sent from the JavaScript simulation
submission_dict = {'test': 'the_answer'}
input_dict = {'1_2_1': json.dumps(submission_dict)}
correct_map = problem.grade_answers(input_dict)
# Expect that the problem is graded as true
# (That is, our script verifies that the context
# is what we expect)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
def test_check_function_randomization(self):
# The check function should get a random seed from the problem.
script = "correct = ['correct' if (submission[0]['num'] == {code}) else 'incorrect']".format(code=self._get_random_number_code())
problem = self.build_problem(answer=script)
submission_dict = {'num': self._get_random_number_result(problem.seed)}
input_dict = {'1_2_1': json.dumps(submission_dict)}
correct_map = problem.grade_answers(input_dict)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
def test_script_exception(self):
# Construct a script that will raise an exception
script = "raise Exception('test')"
problem = self.build_problem(answer=script)
# Expect that an exception gets raised when we check the answer
with self.assertRaises(ResponseError):
submission_dict = {'test': 'test'}
input_dict = {'1_2_1': json.dumps(submission_dict)}
problem.grade_answers(input_dict)
class AnnotationResponseTest(ResponseTest):
from capa.tests.response_xml_factory import AnnotationResponseXMLFactory
xml_factory_class = AnnotationResponseXMLFactory
def test_grade(self):
(correct, partially, incorrect) = ('correct', 'partially-correct', 'incorrect')
answer_id = '1_2_1'
options = (('x', correct), ('y', partially), ('z', incorrect))
make_answer = lambda option_ids: {answer_id: json.dumps({'options': option_ids})}
tests = [
{'correctness': correct, 'points': 2, 'answers': make_answer([0])},
{'correctness': partially, 'points': 1, 'answers': make_answer([1])},
{'correctness': incorrect, 'points': 0, 'answers': make_answer([2])},
{'correctness': incorrect, 'points': 0, 'answers': make_answer([0, 1, 2])},
{'correctness': incorrect, 'points': 0, 'answers': make_answer([])},
{'correctness': incorrect, 'points': 0, 'answers': make_answer('')},
{'correctness': incorrect, 'points': 0, 'answers': make_answer(None)},
{'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null'}},
]
for test in tests:
expected_correctness = test['correctness']
expected_points = test['points']
answers = test['answers']
problem = self.build_problem(options=options)
correct_map = problem.grade_answers(answers)
actual_correctness = correct_map.get_correctness(answer_id)
actual_points = correct_map.get_npoints(answer_id)
self.assertEqual(expected_correctness, actual_correctness,
msg="%s should be marked %s" % (answer_id, expected_correctness))
self.assertEqual(expected_points, actual_points,
msg="%s should have %d points" % (answer_id, expected_points))
class ChoiceTextResponseTest(ResponseTest):
"""
Class containing setup and tests for ChoiceText responsetype.
"""
from response_xml_factory import ChoiceTextResponseXMLFactory
xml_factory_class = ChoiceTextResponseXMLFactory
# `TEST_INPUTS` is a dictionary mapping from
# test_name to a representation of inputs for a test problem.
TEST_INPUTS = {
"1_choice_0_input_correct": [(True, [])],
"1_choice_0_input_incorrect": [(False, [])],
"1_choice_0_input_invalid_choice": [(False, []), (True, [])],
"1_choice_1_input_correct": [(True, ["123"])],
"1_input_script_correct": [(True, ["2"])],
"1_input_script_incorrect": [(True, ["3.25"])],
"1_choice_2_inputs_correct": [(True, ["123", "456"])],
"1_choice_2_inputs_tolerance": [(True, ["123 + .5", "456 + 9"])],
"1_choice_2_inputs_1_wrong": [(True, ["0", "456"])],
"1_choice_2_inputs_both_wrong": [(True, ["0", "0"])],
"1_choice_2_inputs_inputs_blank": [(True, ["", ""])],
"1_choice_2_inputs_empty": [(False, [])],
"1_choice_2_inputs_fail_tolerance": [(True, ["123 + 1.5", "456 + 9"])],
"1_choice_1_input_within_tolerance": [(True, ["122.5"])],
"1_choice_1_input_answer_incorrect": [(True, ["345"])],
"1_choice_1_input_choice_incorrect": [(False, ["123"])],
"2_choices_0_inputs_correct": [(False, []), (True, [])],
"2_choices_0_inputs_incorrect": [(True, []), (False, [])],
"2_choices_0_inputs_blank": [(False, []), (False, [])],
"2_choices_1_input_1_correct": [(False, []), (True, ["123"])],
"2_choices_1_input_1_incorrect": [(True, []), (False, ["123"])],
"2_choices_1_input_input_wrong": [(False, []), (True, ["321"])],
"2_choices_1_input_1_blank": [(False, []), (False, [])],
"2_choices_1_input_2_correct": [(True, []), (False, ["123"])],
"2_choices_1_input_2_incorrect": [(False, []), (True, ["123"])],
"2_choices_2_inputs_correct": [(True, ["123"]), (False, [])],
"2_choices_2_inputs_wrong_choice": [(False, ["123"]), (True, [])],
"2_choices_2_inputs_wrong_input": [(True, ["321"]), (False, [])]
}
# `TEST_SCENARIOS` is a dictionary of the form
# {Test_Name" : (Test_Problem_name, correctness)}
# correctness represents whether the problem should be graded as
# correct or incorrect when the test is run.
TEST_SCENARIOS = {
"1_choice_0_input_correct": ("1_choice_0_input", "correct"),
"1_choice_0_input_incorrect": ("1_choice_0_input", "incorrect"),
"1_choice_0_input_invalid_choice": ("1_choice_0_input", "incorrect"),
"1_input_script_correct": ("1_input_script", "correct"),
"1_input_script_incorrect": ("1_input_script", "incorrect"),
"1_choice_2_inputs_correct": ("1_choice_2_inputs", "correct"),
"1_choice_2_inputs_tolerance": ("1_choice_2_inputs", "correct"),
"1_choice_2_inputs_1_wrong": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_both_wrong": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_inputs_blank": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_empty": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_fail_tolerance": ("1_choice_2_inputs", "incorrect"),
"1_choice_1_input_correct": ("1_choice_1_input", "correct"),
"1_choice_1_input_within_tolerance": ("1_choice_1_input", "correct"),
"1_choice_1_input_answer_incorrect": ("1_choice_1_input", "incorrect"),
"1_choice_1_input_choice_incorrect": ("1_choice_1_input", "incorrect"),
"2_choices_0_inputs_correct": ("2_choices_0_inputs", "correct"),
"2_choices_0_inputs_incorrect": ("2_choices_0_inputs", "incorrect"),
"2_choices_0_inputs_blank": ("2_choices_0_inputs", "incorrect"),
"2_choices_1_input_1_correct": ("2_choices_1_input_1", "correct"),
"2_choices_1_input_1_incorrect": ("2_choices_1_input_1", "incorrect"),
"2_choices_1_input_input_wrong": ("2_choices_1_input_1", "incorrect"),
"2_choices_1_input_1_blank": ("2_choices_1_input_1", "incorrect"),
"2_choices_1_input_2_correct": ("2_choices_1_input_2", "correct"),
"2_choices_1_input_2_incorrect": ("2_choices_1_input_2", "incorrect"),
"2_choices_2_inputs_correct": ("2_choices_2_inputs", "correct"),
"2_choices_2_inputs_wrong_choice": ("2_choices_2_inputs", "incorrect"),
"2_choices_2_inputs_wrong_input": ("2_choices_2_inputs", "incorrect")
}
# Dictionary that maps from problem_name to arguments for
# _make_problem, that will create the problem.
TEST_PROBLEM_ARGS = {
"1_choice_0_input": {"choices": ("true", {}), "script": ''},
"1_choice_1_input": {
"choices": ("true", {"answer": "123", "tolerance": "1"}),
"script": ''
},
"1_input_script": {
"choices": ("true", {"answer": "$computed_response", "tolerance": "1"}),
"script": "computed_response = math.sqrt(4)"
},
"1_choice_2_inputs": {
"choices": [
(
"true", (
{"answer": "123", "tolerance": "1"},
{"answer": "456", "tolerance": "10"}
)
)
],
"script": ''
},
"2_choices_0_inputs": {
"choices": [("false", {}), ("true", {})],
"script": ''
},
"2_choices_1_input_1": {
"choices": [
("false", {}), ("true", {"answer": "123", "tolerance": "0"})
],
"script": ''
},
"2_choices_1_input_2": {
"choices": [("true", {}), ("false", {"answer": "123", "tolerance": "0"})],
"script": ''
},
"2_choices_2_inputs": {
"choices": [
("true", {"answer": "123", "tolerance": "0"}),
("false", {"answer": "999", "tolerance": "0"})
],
"script": ''
}
}
def _make_problem(self, choices, in_type='radiotextgroup', script=''):
"""
Convenience method to fill in default values for script and
type if needed, then call self.build_problem
"""
return self.build_problem(
choices=choices,
type=in_type,
script=script
)
def _make_answer_dict(self, choice_list):
"""
Convenience method to make generation of answers less tedious,
pass in an iterable argument with elements of the form: [bool, [ans,]]
Will generate an answer dict for those options
"""
answer_dict = {}
for index, choice_answers_pair in enumerate(choice_list):
# Choice is whether this choice is correct
# Answers contains a list of answers to textinpts for the choice
choice, answers = choice_answers_pair
if choice:
# Radio/Checkbox inputs in choicetext problems follow
# a naming convention that gives them names ending with "bc"
choice_id = "1_2_1_choiceinput_{index}bc".format(index=index)
choice_value = "choiceinput_{index}".format(index=index)
answer_dict[choice_id] = choice_value
# Build the names for the numtolerance_inputs and add their answers
# to `answer_dict`.
for ind, answer in enumerate(answers):
# In `answer_id` `index` represents the ordinality of the
# choice and `ind` represents the ordinality of the
# numtolerance_input inside the parent choice.
answer_id = "1_2_1_choiceinput_{index}_numtolerance_input_{ind}".format(
index=index,
ind=ind
)
answer_dict[answer_id] = answer
return answer_dict
def test_invalid_xml(self):
"""
Test that build problem raises errors for invalid options
"""
with self.assertRaises(Exception):
self.build_problem(type="invalidtextgroup")
def test_valid_xml(self):
"""
Test that `build_problem` builds valid xml
"""
self.build_problem()
self.assertTrue(True)
def test_unchecked_input_not_validated(self):
"""
Test that a student can have a non numeric answer in an unselected
choice without causing an error to be raised when the problem is
checked.
"""
two_choice_two_input = self._make_problem(
[
("true", {"answer": "123", "tolerance": "1"}),
("false", {})
],
"checkboxtextgroup"
)
self.assert_grade(
two_choice_two_input,
self._make_answer_dict([(True, ["1"]), (False, ["Platypus"])]),
"incorrect"
)
def test_interpret_error(self):
"""
Test that student answers that cannot be interpeted as numbers
cause the response type to raise an error.
"""
two_choice_two_input = self._make_problem(
[
("true", {"answer": "123", "tolerance": "1"}),
("false", {})
],
"checkboxtextgroup"
)
with self.assertRaisesRegexp(StudentInputError, "Could not interpret"):
# Test that error is raised for input in selected correct choice.
self.assert_grade(
two_choice_two_input,
self._make_answer_dict([(True, ["Platypus"])]),
"correct"
)
with self.assertRaisesRegexp(StudentInputError, "Could not interpret"):
# Test that error is raised for input in selected incorrect choice.
self.assert_grade(
two_choice_two_input,
self._make_answer_dict([(True, ["1"]), (True, ["Platypus"])]),
"correct"
)
def test_staff_answer_error(self):
broken_problem = self._make_problem(
[("true", {"answer": "Platypus", "tolerance": "0"}),
("true", {"answer": "edX", "tolerance": "0"})
],
"checkboxtextgroup"
)
with self.assertRaisesRegexp(
StudentInputError,
"The Staff answer could not be interpreted as a number."
):
self.assert_grade(
broken_problem,
self._make_answer_dict(
[(True, ["1"]), (True, ["1"])]
),
"correct"
)
def test_radio_grades(self):
"""
Test that confirms correct operation of grading when the inputtag is
radiotextgroup.
"""
for name, inputs in self.TEST_INPUTS.iteritems():
# Turn submission into the form expected when grading this problem.
submission = self._make_answer_dict(inputs)
# Lookup the problem_name, and the whether this test problem
# and inputs should be graded as correct or incorrect.
problem_name, correctness = self.TEST_SCENARIOS[name]
# Load the args needed to build the problem for this test.
problem_args = self.TEST_PROBLEM_ARGS[problem_name]
test_choices = problem_args["choices"]
test_script = problem_args["script"]
# Build the actual problem for the test.
test_problem = self._make_problem(test_choices, 'radiotextgroup', test_script)
# Make sure the actual grade matches the expected grade.
self.assert_grade(
test_problem,
submission,
correctness,
msg="{0} should be {1}".format(
name,
correctness
)
)
def test_checkbox_grades(self):
"""
Test that confirms correct operation of grading when the inputtag is
checkboxtextgroup.
"""
# Dictionary from name of test_scenario to (problem_name, correctness)
# Correctness is used to test whether the problem was graded properly
scenarios = {
"2_choices_correct": ("checkbox_two_choices", "correct"),
"2_choices_incorrect": ("checkbox_two_choices", "incorrect"),
"2_choices_2_inputs_correct": (
"checkbox_2_choices_2_inputs",
"correct"
),
"2_choices_2_inputs_missing_choice": (
"checkbox_2_choices_2_inputs",
"incorrect"
),
"2_choices_2_inputs_wrong_input": (
"checkbox_2_choices_2_inputs",
"incorrect"
)
}
# Dictionary scenario_name: test_inputs
inputs = {
"2_choices_correct": [(True, []), (True, [])],
"2_choices_incorrect": [(True, []), (False, [])],
"2_choices_2_inputs_correct": [(True, ["123"]), (True, ["456"])],
"2_choices_2_inputs_missing_choice": [
(True, ["123"]), (False, ["456"])
],
"2_choices_2_inputs_wrong_input": [
(True, ["123"]), (True, ["654"])
]
}
# Two choice zero input problem with both choices being correct.
checkbox_two_choices = self._make_problem(
[("true", {}), ("true", {})], "checkboxtextgroup"
)
# Two choice two input problem with both choices correct.
checkbox_two_choices_two_inputs = self._make_problem(
[("true", {"answer": "123", "tolerance": "0"}),
("true", {"answer": "456", "tolerance": "0"})
],
"checkboxtextgroup"
)
# Dictionary problem_name: problem
problems = {
"checkbox_two_choices": checkbox_two_choices,
"checkbox_2_choices_2_inputs": checkbox_two_choices_two_inputs
}
for name, inputs in inputs.iteritems():
submission = self._make_answer_dict(inputs)
# Load the test problem's name and desired correctness
problem_name, correctness = scenarios[name]
# Load the problem
problem = problems[problem_name]
# Make sure the actual grade matches the expected grade
self.assert_grade(
problem,
submission,
correctness,
msg="{0} should be {1}".format(name, correctness)
)
| XiaodunServerGroup/ddyedx | common/lib/capa/capa/tests/test_responsetypes.py | Python | agpl-3.0 | 92,013 |
import types
from StringIO import StringIO
from validator.contextgenerator import ContextGenerator
class PropertiesParser(object):
"""
Parses and serializes .properties files. Even though you can pretty
much do this in your sleep, it's still useful for L10n tests.
"""
def __init__(self, dtd):
"""
Properties parsers can initialized based on a file path
(provided as a string to the path), or directly (in memory as a
StringIO object).
"""
self.entities = {}
self.items = []
if isinstance(dtd, types.StringTypes):
data = open(dtd).read()
elif isinstance(dtd, StringIO):
data = dtd.getvalue()
elif isinstance(dtd, file):
data = dtd.read()
# Create a context!
self.context = ContextGenerator(data)
split_data = data.split("\n")
line_buffer = None
line_number = 0
for line in split_data:
# Increment the line number
line_number += 1
# Clean things up
clean_line = line.strip()
if not clean_line:
continue
if clean_line.startswith("#"):
continue
# It's a line that wraps
if clean_line.count("=") == 0:
if line_buffer:
line_buffer[-1] += clean_line
else:
continue
else:
if line_buffer:
# This line terminates a wrapped line
self.entities[line_buffer[0].strip()] = \
line_buffer[1].strip()
self.items.append((line_buffer[0].strip(),
line_buffer[1].strip(),
line_number))
line_buffer = clean_line.split("=", 1)
# Handle any left-over wrapped line data
if line_buffer:
self.entities[line_buffer[0].strip()] = \
line_buffer[1].strip()
self.items.append((line_buffer[0].strip(),
line_buffer[1].strip(),
line_number))
def __len__(self):
return len(self.entities)
| mattbasta/amo-validator | validator/testcases/l10n/properties.py | Python | bsd-3-clause | 2,287 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import calculus
import numpy as np
import system_constraints as cnstr
import system_loglikelihood as logl
class direction:
def __init__(self,panel):
self.gradient=calculus.gradient(panel)
self.hessian=calculus.hessian(panel,self.gradient)
self.panel=panel
self.constr=None
self.hessian_num=None
self.g_old=None
self.do_shocks=True
self.old_dx_conv=None
self.I=np.diag(np.ones(panel.args.n_args))
def get(self,ll,args,dx_norm,its,mp,dxi,numerical,precise_hessian):
if its==0:
ll=self.init_ll(args)
if ll.LL is None:
raise RuntimeError("Error in LL calculation: %s" %(ll.err_msg,))
self.constr=cnstr.constraints(self.panel,ll.args_v)
cnstr.add_static_constraints(self.constr,self.panel,ll,its)
g,G=self.get_gradient(ll)
hessian=self.get_hessian(ll,mp,g,G,dxi,its,dx_norm,numerical,precise_hessian)
cnstr.add_constraints(G,self.panel,ll,self.constr,dx_norm,self.old_dx_conv,hessian,its)
self.old_dx_conv=dx_norm
dc=solve(self.constr,hessian, g, ll.args_v)
include=np.ones(len(g))
for j in range(len(dc)):
s=dc*g*include
if np.sum(s)<0:#negative slope
s=np.argsort(s)
k=s[0]
self.constr.add(k, None, 'neg. slope')
include[k]=False
dc=solve(self.constr,hessian, g, ll.args_v)
else:
break
return dc,g,G,hessian,self.constr,ll
def get_gradient(self,ll):
DLL_e=-(ll.e_RE*ll.v_inv)*self.panel.included
dLL_lnv=-0.5*(self.panel.included-(ll.e_REsq*ll.v_inv)*self.panel.included)
g,G=self.gradient.get(ll,DLL_e,dLL_lnv,return_G=True)
return g,G
def get_hessian(self,ll,mp,g,G,dxi,its,dx_norm,numerical,precise):
hessian=None
I=self.I
d2LL_de2=-ll.v_inv*self.panel.included
d2LL_dln_de=ll.e_RE*ll.v_inv*self.panel.included
d2LL_dln2=-0.5*ll.e_REsq*ll.v_inv*self.panel.included
if not numerical or self.hessian_num is None:
hessian=self.hessian.get(ll,mp,d2LL_de2,d2LL_dln_de,d2LL_dln2)
else:
hessian=self.nummerical_hessian(hessian, dxi,g)
return hessian
self.hessian_num=hessian
self.g_old=g
if precise:
return hessian
if dx_norm is None:
m=10
else:
m=max(dx_norm)**2
hessian=(hessian+m*I*hessian)/(1+m)
return hessian
def nummerical_hessian(self,dxi,g):
I=self.I
if (self.g_old is None) or (dxi is None):
return I
#print("Using numerical hessian")
hessin_num=hessin(self.hessian_num)
if hessin_num is None:
return I
hessin_num=nummerical_hessin(g,self.g_old,hessin_num,dxi)
hessian=hessin(hessin_num)
if hessian is None:
return I
return hessian
def init_ll(self,args):
self.constr=cnstr.constraints(self.panel,args)
cnstr.add_static_constraints(self.constr,self.panel,None,0)
ll=logl.LL(args, self.panel, constraints=self.constr)
if ll.LL is None:
print("""You requested stored arguments from a previous session
to be used as initial arguments (loadargs=True) but these failed to
return a valid log likelihood with the new parameters. Default inital
arguments will be used. """)
ll=logl.LL(self.panel.args.args_init,self.panel,constraints=self.constr)
return ll
def hessin(hessian):
try:
h=-np.linalg.inv(hessian)
except:
return None
return h
def nummerical_hessin(g,g_old,hessin,dxi):
if dxi is None:
return None
dg=g-g_old #Compute difference of gradients,
#and difference times current matrix:
n=len(g)
hdg=(np.dot(hessin,dg.reshape(n,1))).flatten()
fac=fae=sumdg=sumxi=0.0 #Calculate dot products for the denominators.
fac = np.sum(dg*dxi)
fae = np.sum(dg*hdg)
sumdg = np.sum(dg*dg)
sumxi = np.sum(dxi*dxi)
if (fac > (3.0e-16*sumdg*sumxi)**0.5):#Skip update if fac not sufficiently positive.
fac=1.0/fac
fad=1.0/fae
#The vector that makes BFGS different from DFP:
dg=fac*dxi-fad*hdg
#The BFGS updating formula:
hessin+=fac*dxi.reshape(n,1)*dxi.reshape(1,n)
hessin-=fad*hdg.reshape(n,1)*hdg.reshape(1,n)
hessin+=fae*dg.reshape(n,1)*dg.reshape(1,n)
return hessin
def solve(constr,H, g, x):
"""Solves a second degree taylor expansion for the dc for df/dc=0 if f is quadratic, given gradient
g, hessian H, inequalty constraints c and equalitiy constraints c_eq and returns the solution and
and index constrained indicating the constrained variables"""
if H is None:
return None,g*0
n=len(H)
k=len(constr.constraints)
H=np.concatenate((H,np.zeros((n,k))),1)
H=np.concatenate((H,np.zeros((k,n+k))),0)
g=np.append(g,(k)*[0])
for i in range(k):
H[n+i,n+i]=1
j=0
xi=np.zeros(len(g))
for i in constr.fixed:
kuhn_tucker(constr.fixed[i],i,j,n, H, g, x,xi, recalc=False)
j+=1
xi=-np.linalg.solve(H,g).flatten()
OK=False
w=0
for r in range(50):
j2=j
for i in constr.intervals:
xi=kuhn_tucker(constr.intervals[i],i,j2,n, H, g, x,xi)
j2+=1
OK=constr.within(x+xi[:n],False)
if OK:
break
if r==k+3:
#print('Unable to set constraints in direction calculation')
break
return xi[:n]
def kuhn_tucker(c,i,j,n,H,g,x,xi,recalc=True):
q=None
if not c.value is None:
q=-(c.value-x[i])
elif x[i]+xi[i]<c.min:
q=-(c.min-x[i])
elif x[i]+xi[i]>c.max:
q=-(c.max-x[i])
if q!=None:
H[i,n+j]=1
H[n+j,i]=1
H[n+j,n+j]=0
g[n+j]=q
if recalc:
xi=-np.linalg.solve(H,g).flatten()
return xi | espensirnes/paneltime | paneltime/system/system_direction.py | Python | gpl-3.0 | 5,279 |
from condottieri_notification.models import Notice
def notification(request):
if request.user.is_authenticated:
return {
"notice_unseen_count": Notice.objects.unseen_count_for(request.user)
}
else:
return {}
| jantoniomartin/condottieri_notification | context_processors.py | Python | agpl-3.0 | 253 |
from flask import request, session, redirect, url_for, current_app
from .base import authlib_oauth_client
from ..models.setting import Setting
def oidc_oauth():
if not Setting().get('oidc_oauth_enabled'):
return None
def fetch_oidc_token():
return session.get('oidc_token')
def update_token(token):
session['oidc_token'] = token
return token
oidc = authlib_oauth_client.register(
'oidc',
client_id=Setting().get('oidc_oauth_key'),
client_secret=Setting().get('oidc_oauth_secret'),
api_base_url=Setting().get('oidc_oauth_api_url'),
request_token_url=None,
access_token_url=Setting().get('oidc_oauth_token_url'),
authorize_url=Setting().get('oidc_oauth_authorize_url'),
client_kwargs={'scope': Setting().get('oidc_oauth_scope')},
fetch_token=fetch_oidc_token,
update_token=update_token)
@current_app.route('/oidc/authorized')
def oidc_authorized():
session['oidc_oauthredir'] = url_for('.oidc_authorized',
_external=True)
token = oidc.authorize_access_token()
if token is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error'], request.args['error_description'])
session['oidc_token'] = (token)
return redirect(url_for('index.login'))
return oidc | ngoduykhanh/PowerDNS-Admin | powerdnsadmin/services/oidc.py | Python | mit | 1,425 |
"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from tests.helpers import http_client
def test_view_profile_of_existing_user(site_app, site, user):
response = request_profile(site_app, user.id)
assert response.status_code == 200
assert response.mimetype == 'text/html'
def test_view_profile_of_uninitialized_user(
site_app, site, uninitialized_user
):
response = request_profile(site_app, uninitialized_user.id)
assert response.status_code == 404
def test_view_profile_of_suspended_user(site_app, site, suspended_user):
response = request_profile(site_app, suspended_user.id)
assert response.status_code == 404
def test_view_profile_of_deleted_user(site_app, site, deleted_user):
response = request_profile(site_app, deleted_user.id)
assert response.status_code == 404
def test_view_profile_of_unknown_user(site_app, site):
unknown_user_id = '00000000-0000-0000-0000-000000000000'
response = request_profile(site_app, unknown_user_id)
assert response.status_code == 404
# helpers
def request_profile(app, user_id):
url = f'/users/{user_id}'
with http_client(app) as client:
return client.get(url)
| homeworkprod/byceps | tests/integration/blueprints/site/user_profile/test_view.py | Python | bsd-3-clause | 1,245 |
import sys, copy, types as pytypes
IS_RPYTHON = sys.argv[0].endswith('rpython')
if IS_RPYTHON:
from rpython.rlib.listsort import TimSort
else:
import re
# General functions
class StringSort(TimSort):
def lt(self, a, b):
assert isinstance(a, unicode)
assert isinstance(b, unicode)
return a < b
def _equal_Q(a, b):
assert isinstance(a, MalType) and isinstance(b, MalType)
ota, otb = a.__class__, b.__class__
if not (ota is otb or (_sequential_Q(a) and _sequential_Q(b))):
return False
if isinstance(a, MalSym) and isinstance(b, MalSym):
return a.value == b.value
elif isinstance(a, MalStr) and isinstance(b, MalStr):
return a.value == b.value
elif isinstance(a, MalInt) and isinstance(b, MalInt):
return a.value == b.value
elif _list_Q(a) or _vector_Q(a):
if len(a) != len(b): return False
for i in range(len(a)):
if not _equal_Q(a[i], b[i]): return False
return True
elif _hash_map_Q(a):
assert isinstance(a, MalHashMap)
assert isinstance(b, MalHashMap)
akeys = a.dct.keys()
bkeys = b.dct.keys()
if len(akeys) != len(bkeys): return False
StringSort(akeys).sort()
StringSort(bkeys).sort()
for i in range(len(akeys)):
ak, bk = akeys[i], bkeys[i]
assert isinstance(ak, unicode)
assert isinstance(bk, unicode)
if ak != bk: return False
av, bv = a.dct[ak], b.dct[bk]
if not _equal_Q(av, bv): return False
return True
elif a is b:
return True
else:
throw_str("no = op defined for %s" % a.__class__.__name__)
def _sequential_Q(seq): return _list_Q(seq) or _vector_Q(seq)
def _clone(obj):
if isinstance(obj, MalFunc):
return MalFunc(obj.fn, obj.ast, obj.env, obj.params,
obj.EvalFunc, obj.ismacro)
elif isinstance(obj, MalList):
return obj.__class__(obj.values)
elif isinstance(obj, MalHashMap):
return MalHashMap(obj.dct)
elif isinstance(obj, MalAtom):
return MalAtom(obj.value)
else:
raise Exception("_clone on invalid type")
def _replace(match, sub, old_str):
new_str = u""
idx = 0
while idx < len(old_str):
midx = old_str.find(match, idx)
if midx < 0: break
assert midx >= 0 and midx < len(old_str)
new_str = new_str + old_str[idx:midx]
new_str = new_str + sub
idx = midx + len(match)
new_str = new_str + old_str[idx:]
return new_str
#
# Mal Types
#
class MalException(Exception):
def __init__(self, object):
self.object = object
def throw_str(s):
raise MalException(MalStr(unicode(s)))
### Parent types
class MalType(): pass
class MalMeta(MalType): pass
### Scalars
class MalNil(MalType): pass
nil = MalNil()
def _nil_Q(exp):
assert isinstance(exp, MalType)
return exp is nil
class MalTrue(MalType): pass
true = MalTrue()
def _true_Q(exp):
assert isinstance(exp, MalType)
return exp is true
class MalFalse(MalType): pass
false = MalFalse()
def _false_Q(exp):
assert isinstance(exp, MalType)
return exp is false
# Numbers
class MalInt(MalType):
def __init__(self, value):
assert isinstance(value, int)
self.value = value
def _int_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalInt
# String
class MalStr(MalType):
def __init__(self, value):
assert isinstance(value, unicode)
self.value = value
def __len__(self):
return len(self.value)
def _string_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalStr and not _keyword_Q(exp)
# Keywords
# A specially prefixed string
def _keyword(mstr):
assert isinstance(mstr, MalType)
if isinstance(mstr, MalStr):
val = mstr.value
if val[0] == u"\u029e": return mstr
else: return MalStr(u"\u029e" + val)
else:
throw_str("_keyword called on non-string")
# Create keyword from unicode string
def _keywordu(strn):
assert isinstance(strn, unicode)
return MalStr(u"\u029e" + strn)
def _keyword_Q(exp):
if isinstance(exp, MalStr) and len(exp.value) > 0:
return exp.value[0] == u"\u029e"
else:
return False
# Symbols
class MalSym(MalMeta):
def __init__(self, value):
assert isinstance(value, unicode)
self.value = value
self.meta = nil
def _symbol(strn):
assert isinstance(strn, unicode)
return MalSym(strn)
def _symbol_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalSym
# lists
class MalList(MalMeta):
def __init__(self, vals):
assert isinstance(vals, list)
self.values = vals
self.meta = nil
def append(self, val):
self.values.append(val)
def rest(self):
return MalList(self.values[1:])
def __len__(self):
return len(self.values)
def __getitem__(self, i):
assert isinstance(i, int)
return self.values[i]
def slice(self, start):
return MalList(self.values[start:len(self.values)])
def slice2(self, start, end):
assert end >= 0
return MalList(self.values[start:end])
def _list(*vals): return MalList(list(vals))
def _listl(lst): return MalList(lst)
def _list_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalList
### vectors
class MalVector(MalList):
pass
def _vector(*vals): return MalVector(list(vals))
def _vectorl(lst): return MalVector(lst)
def _vector_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalVector
### hash maps
class MalHashMap(MalMeta):
def __init__(self, dct):
self.dct = dct
self.meta = nil
def append(self, val):
self.dct.append(val)
def __getitem__(self, k):
assert isinstance(k, unicode)
if not isinstance(k, unicode):
throw_str("hash-map lookup by non-string/non-keyword")
return self.dct[k]
def __setitem__(self, k, v):
if not isinstance(k, unicode):
throw_str("hash-map key must be string or keyword")
assert isinstance(v, MalType)
self.dct[k] = v
return v
def _hash_mapl(kvs):
dct = {}
for i in range(0, len(kvs), 2):
k = kvs[i]
if not isinstance(k, MalStr):
throw_str("hash-map key must be string or keyword")
v = kvs[i+1]
dct[k.value] = v
return MalHashMap(dct)
def _hash_map_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalHashMap
# Functions
# env import must happen after MalSym and MalList definitions to allow
# circular dependency
from env import Env
class MalFunc(MalMeta):
def __init__(self, fn, ast=None, env=None, params=None,
EvalFunc=None, ismacro=False):
if fn is None and EvalFunc is None:
throw_str("MalFunc requires either fn or EvalFunc")
self.fn = fn
self.ast = ast
self.env = env
self.params = params
self.EvalFunc = EvalFunc
self.ismacro = ismacro
self.meta = nil
def apply(self, args):
if self.EvalFunc:
return self.EvalFunc(self.ast, self.gen_env(args))
else:
return self.fn(args)
def gen_env(self, args):
return Env(self.env, self.params, args)
def _function_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalFunc
# atoms
class MalAtom(MalMeta):
def __init__(self, value):
self.value = value
self.meta = nil
def get_value(self):
return self.value
def _atom(val): return MalAtom(val)
def _atom_Q(exp): return exp.__class__ is MalAtom
| hterkelsen/mal | rpython/mal_types.py | Python | mpl-2.0 | 7,771 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
import mock
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova.pci import pci_manager
from nova import test
from nova.tests import fake_instance
from nova.tests.virt.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent as xenapi_agent
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
class VMOpsTestBase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VMOpsTestBase, self).setUp()
self._setup_mock_vmops()
self.vms = []
def _setup_mock_vmops(self, product_brand=None, product_version=None):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
self._session = xenapi_session.XenAPISession('test_url', 'root',
'test_pass')
self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
def create_vm(self, name, state="Running"):
vm_ref = xenapi_fake.create_vm(name, state)
self.vms.append(vm_ref)
vm = xenapi_fake.get_record("VM", vm_ref)
return vm, vm_ref
def tearDown(self):
super(VMOpsTestBase, self).tearDown()
for vm in self.vms:
xenapi_fake.destroy_vm(vm)
class VMOpsTestCase(VMOpsTestBase):
def setUp(self):
super(VMOpsTestCase, self).setUp()
self._setup_mock_vmops()
def _setup_mock_vmops(self, product_brand=None, product_version=None):
self._session = self._get_mock_session(product_brand, product_version)
self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
def _get_mock_session(self, product_brand, product_version):
class Mock(object):
pass
mock_session = Mock()
mock_session.product_brand = product_brand
mock_session.product_version = product_version
return mock_session
def _test_finish_revert_migration_after_crash(self, backup_made, new_made,
vm_shutdown=True):
instance = {'name': 'foo',
'task_state': task_states.RESIZE_MIGRATING}
context = 'fake_context'
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self._vmops, '_destroy')
self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')
self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices')
self.mox.StubOutWithMock(self._vmops, '_start')
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.lookup(self._session, 'foo-orig').AndReturn(
backup_made and 'foo' or None)
vm_utils.lookup(self._session, 'foo').AndReturn(
(not backup_made or new_made) and 'foo' or None)
if backup_made:
if new_made:
self._vmops._destroy(instance, 'foo')
vm_utils.set_vm_name_label(self._session, 'foo', 'foo')
self._vmops._attach_mapped_block_devices(instance, [])
vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown)
if vm_shutdown:
self._vmops._start(instance, 'foo')
self.mox.ReplayAll()
self._vmops.finish_revert_migration(context, instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(False, False)
def test_xsm_sr_check_relaxed_cached(self):
self.make_plugin_call_count = 0
def fake_make_plugin_call(plugin, method, **args):
self.make_plugin_call_count = self.make_plugin_call_count + 1
return "true"
self.stubs.Set(self._vmops, "_make_plugin_call",
fake_make_plugin_call)
self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
self.assertEqual(self.make_plugin_call_count, 1)
def test_get_vm_opaque_ref_raises_instance_not_found(self):
instance = {"name": "dummy"}
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(self._session, instance['name'], False).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceNotFound,
self._vmops._get_vm_opaque_ref, instance)
class InjectAutoDiskConfigTestCase(VMOpsTestBase):
def setUp(self):
super(InjectAutoDiskConfigTestCase, self).setUp()
def test_inject_auto_disk_config_when_present(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True}
self.vmops._inject_auto_disk_config(instance, vm_ref)
xenstore_data = vm['xenstore_data']
self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True')
def test_inject_auto_disk_config_none_as_false(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
self.vmops._inject_auto_disk_config(instance, vm_ref)
xenstore_data = vm['xenstore_data']
self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False')
class GetConsoleOutputTestCase(VMOpsTestBase):
def setUp(self):
super(GetConsoleOutputTestCase, self).setUp()
def test_get_console_output_works(self):
self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
instance = {"name": "dummy"}
self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(42)
self.mox.ReplayAll()
self.assertEqual("dom_id: 42", self.vmops.get_console_output(instance))
def test_get_console_output_throws_nova_exception(self):
self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
instance = {"name": "dummy"}
# dom_id=0 used to trigger exception in fake XenAPI
self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(0)
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.vmops.get_console_output, instance)
def test_get_dom_id_works(self):
instance = {"name": "dummy"}
vm, vm_ref = self.create_vm("dummy")
self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance))
def test_get_dom_id_works_with_rescue_vm(self):
instance = {"name": "dummy"}
vm, vm_ref = self.create_vm("dummy-rescue")
self.assertEqual(vm["domid"],
self.vmops._get_dom_id(instance, check_rescue=True))
def test_get_dom_id_raises_not_found(self):
instance = {"name": "dummy"}
self.create_vm("not-dummy")
self.assertRaises(exception.NotFound, self.vmops._get_dom_id, instance)
def test_get_dom_id_works_with_vmref(self):
vm, vm_ref = self.create_vm("dummy")
self.assertEqual(vm["domid"],
self.vmops._get_dom_id(vm_ref=vm_ref))
class SpawnTestCase(VMOpsTestBase):
def _stub_out_common(self):
self.mox.StubOutWithMock(self.vmops, '_ensure_instance_name_unique')
self.mox.StubOutWithMock(self.vmops, '_ensure_enough_free_mem')
self.mox.StubOutWithMock(self.vmops, '_update_instance_progress')
self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type')
self.mox.StubOutWithMock(vm_utils, 'get_vdis_for_instance')
self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis')
self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis')
self.mox.StubOutWithMock(vm_utils,
'create_kernel_and_ramdisk')
self.mox.StubOutWithMock(vm_utils, 'destroy_kernel_ramdisk')
self.mox.StubOutWithMock(self.vmops, '_create_vm_record')
self.mox.StubOutWithMock(self.vmops, '_destroy')
self.mox.StubOutWithMock(self.vmops, '_attach_disks')
self.mox.StubOutWithMock(pci_manager, 'get_instance_pci_devs')
self.mox.StubOutWithMock(vm_utils, 'set_other_config_pci')
self.mox.StubOutWithMock(self.vmops, '_attach_orig_disk_for_rescue')
self.mox.StubOutWithMock(self.vmops, 'inject_network_info')
self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata')
self.mox.StubOutWithMock(self.vmops, '_inject_auto_disk_config')
self.mox.StubOutWithMock(self.vmops, '_file_inject_vm_settings')
self.mox.StubOutWithMock(self.vmops, '_create_vifs')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'prepare_instance_filter')
self.mox.StubOutWithMock(self.vmops, '_start')
self.mox.StubOutWithMock(self.vmops, '_wait_for_instance_to_start')
self.mox.StubOutWithMock(self.vmops,
'_configure_new_instance_with_agent')
self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'apply_instance_filter')
def _test_spawn(self, name_label_param=None, block_device_info_param=None,
rescue=False, include_root_vdi=True, throw_exception=None,
attach_pci_dev=False):
self._stub_out_common()
instance = {"name": "dummy", "uuid": "fake_uuid"}
name_label = name_label_param
if name_label is None:
name_label = "dummy"
image_meta = {"id": "image_id"}
context = "context"
session = self.vmops._session
injected_files = "fake_files"
admin_password = "password"
network_info = "net_info"
steps = 10
if rescue:
steps += 1
block_device_info = block_device_info_param
if block_device_info and not block_device_info['root_device_name']:
block_device_info = dict(block_device_info_param)
block_device_info['root_device_name'] = \
self.vmops.default_root_dev
di_type = "di_type"
vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
step = 1
self.vmops._update_instance_progress(context, instance, step, steps)
vdis = {"other": {"ref": "fake_ref_2", "osvol": True}}
if include_root_vdi:
vdis["root"] = {"ref": "fake_ref"}
vm_utils.get_vdis_for_instance(context, session, instance, name_label,
"image_id", di_type,
block_device_info=block_device_info).AndReturn(vdis)
self.vmops._resize_up_vdis(instance, vdis)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
kernel_file = "kernel"
ramdisk_file = "ramdisk"
vm_utils.create_kernel_and_ramdisk(context, session,
instance, name_label).AndReturn((kernel_file, ramdisk_file))
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
vm_ref = "fake_vm_ref"
self.vmops._ensure_instance_name_unique(name_label)
self.vmops._ensure_enough_free_mem(instance)
self.vmops._create_vm_record(context, instance, name_label,
di_type, kernel_file,
ramdisk_file, image_meta).AndReturn(vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
network_info, admin_password, injected_files)
if attach_pci_dev:
fake_dev = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': '00:00.0',
'vendor_id': '1234',
'product_id': 'abcd',
'dev_type': 'type-PCI',
'status': 'available',
'dev_id': 'devid',
'label': 'label',
'instance_uuid': None,
'extra_info': '{}',
}
pci_manager.get_instance_pci_devs(instance).AndReturn([fake_dev])
vm_utils.set_other_config_pci(self.vmops._session,
vm_ref,
"0/0000:00:00.0")
else:
pci_manager.get_instance_pci_devs(instance).AndReturn([])
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._inject_instance_metadata(instance, vm_ref)
self.vmops._inject_auto_disk_config(instance, vm_ref)
self.vmops._inject_hostname(instance, vm_ref, rescue)
self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
network_info)
self.vmops.inject_network_info(instance, network_info, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._create_vifs(instance, vm_ref, network_info)
self.vmops.firewall_driver.setup_basic_filtering(instance,
network_info).AndRaise(NotImplementedError)
self.vmops.firewall_driver.prepare_instance_filter(instance,
network_info)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
if rescue:
self.vmops._attach_orig_disk_for_rescue(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step,
steps)
self.vmops._start(instance, vm_ref)
self.vmops._wait_for_instance_to_start(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._configure_new_instance_with_agent(instance, vm_ref,
injected_files, admin_password)
self.vmops._remove_hostname(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops.firewall_driver.apply_instance_filter(instance,
network_info)
step += 1
last_call = self.vmops._update_instance_progress(context, instance,
step, steps)
if throw_exception:
last_call.AndRaise(throw_exception)
self.vmops._destroy(instance, vm_ref, network_info=network_info)
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"])
self.mox.ReplayAll()
self.vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info,
block_device_info_param, name_label_param, rescue)
def test_spawn(self):
self._test_spawn()
def test_spawn_with_alternate_options(self):
self._test_spawn(include_root_vdi=False, rescue=True,
name_label_param="bob",
block_device_info_param={"root_device_name": ""})
def test_spawn_with_pci_available_on_the_host(self):
self._test_spawn(attach_pci_dev=True)
def test_spawn_performs_rollback_and_throws_exception(self):
self.assertRaises(test.TestingException, self._test_spawn,
throw_exception=test.TestingException())
def _test_finish_migration(self, power_on=True, resize_instance=True,
throw_exception=None):
self._stub_out_common()
self.mox.StubOutWithMock(vm_utils, "import_all_migrated_disks")
self.mox.StubOutWithMock(self.vmops, "_attach_mapped_block_devices")
context = "context"
migration = {}
name_label = "dummy"
instance = {"name": name_label, "uuid": "fake_uuid"}
disk_info = "disk_info"
network_info = "net_info"
image_meta = {"id": "image_id"}
block_device_info = "bdi"
session = self.vmops._session
self.vmops._ensure_instance_name_unique(name_label)
self.vmops._ensure_enough_free_mem(instance)
di_type = "di_type"
vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
root_vdi = {"ref": "fake_ref"}
ephemeral_vdi = {"ref": "fake_ref_e"}
vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}}
vm_utils.import_all_migrated_disks(self.vmops._session,
instance).AndReturn(vdis)
kernel_file = "kernel"
ramdisk_file = "ramdisk"
vm_utils.create_kernel_and_ramdisk(context, session,
instance, name_label).AndReturn((kernel_file, ramdisk_file))
vm_ref = "fake_vm_ref"
self.vmops._create_vm_record(context, instance, name_label,
di_type, kernel_file,
ramdisk_file, image_meta).AndReturn(vm_ref)
if resize_instance:
self.vmops._resize_up_vdis(instance, vdis)
self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
network_info, None, None)
self.vmops._attach_mapped_block_devices(instance, block_device_info)
pci_manager.get_instance_pci_devs(instance).AndReturn([])
self.vmops._inject_instance_metadata(instance, vm_ref)
self.vmops._inject_auto_disk_config(instance, vm_ref)
self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
network_info)
self.vmops.inject_network_info(instance, network_info, vm_ref)
self.vmops._create_vifs(instance, vm_ref, network_info)
self.vmops.firewall_driver.setup_basic_filtering(instance,
network_info).AndRaise(NotImplementedError)
self.vmops.firewall_driver.prepare_instance_filter(instance,
network_info)
if power_on:
self.vmops._start(instance, vm_ref)
self.vmops._wait_for_instance_to_start(instance, vm_ref)
self.vmops.firewall_driver.apply_instance_filter(instance,
network_info)
last_call = self.vmops._update_instance_progress(context, instance,
step=5, total_steps=5)
if throw_exception:
last_call.AndRaise(throw_exception)
self.vmops._destroy(instance, vm_ref, network_info=network_info)
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session,
["fake_ref_e", "fake_ref"])
self.mox.ReplayAll()
self.vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def test_finish_migration(self):
self._test_finish_migration()
def test_finish_migration_no_power_on(self):
self._test_finish_migration(power_on=False, resize_instance=False)
def test_finish_migrate_performs_rollback_on_error(self):
self.assertRaises(test.TestingException, self._test_finish_migration,
power_on=False, resize_instance=False,
throw_exception=test.TestingException())
def test_remove_hostname(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
self.mox.StubOutWithMock(self._session, 'call_xenapi')
self._session.call_xenapi("VM.remove_from_xenstore_data", vm_ref,
"vm-data/hostname")
self.mox.ReplayAll()
self.vmops._remove_hostname(instance, vm_ref)
self.mox.VerifyAll()
def test_reset_network(self):
class mock_agent(object):
def __init__(self):
self.called = False
def resetnetwork(self):
self.called = True
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
agent = mock_agent()
self.mox.StubOutWithMock(self.vmops, 'agent_enabled')
self.mox.StubOutWithMock(self.vmops, '_get_agent')
self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
self.vmops.agent_enabled(instance).AndReturn(True)
self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
self.vmops._inject_hostname(instance, vm_ref, False)
self.vmops._remove_hostname(instance, vm_ref)
self.mox.ReplayAll()
self.vmops.reset_network(instance)
self.assertTrue(agent.called)
self.mox.VerifyAll()
def test_inject_hostname(self):
instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'dummy')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=False)
def test_inject_hostname_with_rescue_prefix(self):
instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
'RESCUE-dummy')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=True)
def test_inject_hostname_with_windows_name_truncation(self):
instance = {"hostname": "dummydummydummydummydummy",
"os_type": "windows", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
'RESCUE-dummydum')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=True)
def test_wait_for_instance_to_start(self):
instance = {"uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(vm_utils, 'get_power_state')
self.mox.StubOutWithMock(greenthread, 'sleep')
vm_utils.get_power_state(self._session, vm_ref).AndReturn(
power_state.SHUTDOWN)
greenthread.sleep(0.5)
vm_utils.get_power_state(self._session, vm_ref).AndReturn(
power_state.RUNNING)
self.mox.ReplayAll()
self.vmops._wait_for_instance_to_start(instance, vm_ref)
def test_attach_orig_disk_for_rescue(self):
instance = {"name": "dummy"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self.vmops, '_find_root_vdi_ref')
self.mox.StubOutWithMock(vm_utils, 'create_vbd')
vm_utils.lookup(self.vmops._session, "dummy").AndReturn("ref")
self.vmops._find_root_vdi_ref("ref").AndReturn("vdi_ref")
vm_utils.create_vbd(self.vmops._session, vm_ref, "vdi_ref",
vmops.DEVICE_RESCUE, bootable=False)
self.mox.ReplayAll()
self.vmops._attach_orig_disk_for_rescue(instance, vm_ref)
def test_agent_update_setup(self):
# agent updates need to occur after networking is configured
instance = {'name': 'betelgeuse',
'uuid': '1-2-3-4-5-6'}
vm_ref = 'vm_ref'
agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session,
self.vmops._virtapi, instance, vm_ref)
self.mox.StubOutWithMock(xenapi_agent, 'should_use_agent')
self.mox.StubOutWithMock(self.vmops, '_get_agent')
self.mox.StubOutWithMock(agent, 'get_version')
self.mox.StubOutWithMock(agent, 'resetnetwork')
self.mox.StubOutWithMock(agent, 'update_if_needed')
xenapi_agent.should_use_agent(instance).AndReturn(True)
self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
agent.get_version().AndReturn('1.2.3')
agent.resetnetwork()
agent.update_if_needed('1.2.3')
self.mox.ReplayAll()
self.vmops._configure_new_instance_with_agent(instance, vm_ref,
None, None)
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm')
@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down')
@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up')
class MigrateDiskAndPowerOffTestCase(VMOpsTestBase):
def test_migrate_disk_and_power_off_works_down(self,
migrate_up, migrate_down, *mocks):
instance = {"root_gb": 2, "ephemeral_gb": 0, "uuid": "uuid"}
flavor = {"root_gb": 1, "ephemeral_gb": 0}
self.vmops.migrate_disk_and_power_off(None, instance, None,
flavor, None)
self.assertFalse(migrate_up.called)
self.assertTrue(migrate_down.called)
def test_migrate_disk_and_power_off_works_up(self,
migrate_up, migrate_down, *mocks):
instance = {"root_gb": 1, "ephemeral_gb": 1, "uuid": "uuid"}
flavor = {"root_gb": 2, "ephemeral_gb": 2}
self.vmops.migrate_disk_and_power_off(None, instance, None,
flavor, None)
self.assertFalse(migrate_down.called)
self.assertTrue(migrate_up.called)
def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self,
migrate_up, migrate_down, *mocks):
instance = {"ephemeral_gb": 2}
flavor = {"ephemeral_gb": 1}
self.assertRaises(exception.ResizeError,
self.vmops.migrate_disk_and_power_off,
None, instance, None, flavor, None)
@mock.patch.object(vm_utils, 'migrate_vhd')
@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
@mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm')
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
class MigrateDiskResizingUpTestCase(VMOpsTestBase):
def _fake_snapshot_attached_here(self, session, instance, vm_ref, label,
userdevice, post_snapshot_callback):
self.assertIsInstance(instance, dict)
if userdevice == '0':
self.assertEqual("vm_ref", vm_ref)
self.assertEqual("fake-snapshot", label)
yield ["leaf", "parent", "grandp"]
else:
leaf = userdevice + "-leaf"
parent = userdevice + "-parent"
yield [leaf, parent]
def test_migrate_disk_resizing_up_works_no_ephemeral(self,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = None
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance, "parent",
dest, sr_path, 1),
mock.call(self.vmops._session, instance, "grandp",
dest, sr_path, 2),
mock.call(self.vmops._session, instance, "leaf",
dest, sr_path, 0)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
def test_migrate_disk_resizing_up_works_with_two_ephemeral(self,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance,
"parent", dest, sr_path, 1),
mock.call(self.vmops._session, instance,
"grandp", dest, sr_path, 2),
mock.call(self.vmops._session, instance,
"4-parent", dest, sr_path, 1, 1),
mock.call(self.vmops._session, instance,
"5-parent", dest, sr_path, 1, 2),
mock.call(self.vmops._session, instance,
"leaf", dest, sr_path, 0),
mock.call(self.vmops._session, instance,
"4-leaf", dest, sr_path, 0, 1),
mock.call(self.vmops._session, instance,
"5-leaf", dest, sr_path, 0, 2)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
@mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan')
def test_migrate_disk_resizing_up_rollback(self,
mock_restore,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd):
context = "ctxt"
instance = {"name": "fake", "uuid": "fake"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_migrate_vhd.side_effect = test.TestingException
mock_restore.side_effect = test.TestingException
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.assertRaises(exception.InstanceFaultRollback,
self.vmops._migrate_disk_resizing_up,
context, instance, dest, vm_ref, sr_path)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_restore.assert_called_once_with(instance)
mock_migrate_vhd.assert_called_once_with(self.vmops._session,
instance, "parent", dest, sr_path, 1)
class CreateVMRecordTestCase(VMOpsTestBase):
@mock.patch.object(vm_utils, 'determine_vm_mode')
@mock.patch.object(vm_utils, 'get_vm_device_id')
@mock.patch.object(vm_utils, 'create_vm')
def test_create_vm_record_with_vm_device_id(self, mock_create_vm,
mock_get_vm_device_id, mock_determine_vm_mode):
context = "context"
instance = {"vm_mode": "vm_mode", "uuid": "uuid123"}
name_label = "dummy"
disk_image_type = "vhd"
kernel_file = "kernel"
ramdisk_file = "ram"
device_id = "0002"
image_properties = {"xenapi_device_id": device_id}
image_meta = {"properties": image_properties}
session = "session"
self.vmops._session = session
mock_get_vm_device_id.return_value = device_id
mock_determine_vm_mode.return_value = "vm_mode"
self.vmops._create_vm_record(context, instance, name_label,
disk_image_type, kernel_file, ramdisk_file, image_meta)
mock_get_vm_device_id.assert_called_with(session, image_properties)
mock_create_vm.assert_called_with(session, instance, name_label,
kernel_file, ramdisk_file, False, device_id)
class BootableTestCase(VMOpsTestBase):
def setUp(self):
super(BootableTestCase, self).setUp()
self.instance = {"name": "test", "uuid": "fake"}
vm_rec, self.vm_ref = self.create_vm('test')
# sanity check bootlock is initially disabled:
self.assertEqual({}, vm_rec['blocked_operations'])
def _get_blocked(self):
vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref)
return vm_rec['blocked_operations']
def test_acquire_bootlock(self):
self.vmops._acquire_bootlock(self.vm_ref)
blocked = self._get_blocked()
self.assertIn('start', blocked)
def test_release_bootlock(self):
self.vmops._acquire_bootlock(self.vm_ref)
self.vmops._release_bootlock(self.vm_ref)
blocked = self._get_blocked()
self.assertNotIn('start', blocked)
def test_set_bootable(self):
self.vmops.set_bootable(self.instance, True)
blocked = self._get_blocked()
self.assertNotIn('start', blocked)
def test_set_not_bootable(self):
self.vmops.set_bootable(self.instance, False)
blocked = self._get_blocked()
self.assertIn('start', blocked)
@mock.patch.object(vm_utils, 'update_vdi_virtual_size', autospec=True)
class ResizeVdisTestCase(VMOpsTestBase):
def test_dont_resize_root_volumes_osvol_false(self, mock_resize):
instance = fake_instance.fake_db_instance(root_gb=20)
vdis = {'root': {'osvol': False, 'ref': 'vdi_ref'}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertTrue(mock_resize.called)
def test_dont_resize_root_volumes_osvol_true(self, mock_resize):
instance = fake_instance.fake_db_instance(root_gb=20)
vdis = {'root': {'osvol': True}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
def test_dont_resize_root_volumes_no_osvol(self, mock_resize):
instance = fake_instance.fake_db_instance(root_gb=20)
vdis = {'root': {}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_ensure_ephemeral_resize_with_root_volume(self, mock_sizes,
mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = fake_instance.fake_db_instance(root_gb=20, ephemeral_gb=20)
ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
vdis = {'root': {'osvol': True, 'ref': 'vdi_ref'},
'ephemerals': ephemerals}
with mock.patch.object(vm_utils, 'generate_single_ephemeral',
autospec=True) as g:
self.vmops._resize_up_vdis(instance, vdis)
self.assertEqual([mock.call(self.vmops._session, instance, 4,
2000),
mock.call(self.vmops._session, instance, 5,
1000)],
mock_resize.call_args_list)
self.assertFalse(g.called)
def test_resize_up_vdis_root(self, mock_resize):
instance = {"root_gb": 20, "ephemeral_gb": 0}
self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}})
mock_resize.assert_called_once_with(self.vmops._session, instance,
"vdi_ref", 20)
def test_resize_up_vdis_zero_disks(self, mock_resize):
instance = {"root_gb": 0, "ephemeral_gb": 0}
self.vmops._resize_up_vdis(instance, {"root": {}})
self.assertFalse(mock_resize.called)
def test_resize_up_vdis_no_vdis_like_initial_spawn(self, mock_resize):
instance = {"root_gb": 0, "ephemeral_gb": 3000}
vdis = {}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = {"root_gb": 0, "ephemeral_gb": 3000}
ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
vdis = {"ephemerals": ephemerals}
self.vmops._resize_up_vdis(instance, vdis)
mock_sizes.assert_called_once_with(3000)
expected = [mock.call(self.vmops._session, instance, 4, 2000),
mock.call(self.vmops._session, instance, 5, 1000)]
self.assertEqual(expected, mock_resize.call_args_list)
@mock.patch.object(vm_utils, 'generate_single_ephemeral')
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes,
mock_generate,
mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = {"root_gb": 0, "ephemeral_gb": 3000, "uuid": "a"}
ephemerals = {"4": {"ref": 4}}
vdis = {"ephemerals": ephemerals}
self.vmops._resize_up_vdis(instance, vdis)
mock_sizes.assert_called_once_with(3000)
mock_resize.assert_called_once_with(self.vmops._session, instance,
4, 2000)
mock_generate.assert_called_once_with(self.vmops._session, instance,
None, 5, 1000)
@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, 'resize_disk')
@mock.patch.object(vm_utils, 'migrate_vhd')
@mock.patch.object(vm_utils, 'destroy_vdi')
class MigrateDiskResizingDownTestCase(VMOpsTestBase):
def test_migrate_disk_resizing_down_works_no_ephemeral(
self,
mock_destroy_vdi,
mock_migrate_vhd,
mock_resize_disk,
mock_get_vdi_for_vm_safely,
mock_update_instance_progress,
mock_apply_orig_vm_name_label,
mock_resize_ensure_vm_is_shutdown):
context = "ctx"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
instance_type = dict(root_gb=1)
old_vdi_ref = "old_ref"
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None)
mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid)
self.vmops._migrate_disk_resizing_down(context, instance, dest,
instance_type, vm_ref, sr_path)
mock_get_vdi_for_vm_safely.assert_called_once_with(
self.vmops._session,
vm_ref)
mock_resize_ensure_vm_is_shutdown.assert_called_once_with(
instance, vm_ref)
mock_apply_orig_vm_name_label.assert_called_once_with(
instance, vm_ref)
mock_resize_disk.assert_called_once_with(
self.vmops._session,
instance,
old_vdi_ref,
instance_type)
mock_migrate_vhd.assert_called_once_with(
self.vmops._session,
instance,
new_vdi_uuid,
dest,
sr_path, 0)
mock_destroy_vdi.assert_called_once_with(
self.vmops._session,
new_vdi_ref)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected,
mock_update_instance_progress.call_args_list)
| nkrinner/nova | nova/tests/virt/xenapi/test_vmops.py | Python | apache-2.0 | 42,897 |
from ogcserver.cgiserver import Handler
from jon import fcgi
class OGCServerHandler(Handler):
configpath = '/path/to/ogcserver.conf'
fcgi.Server({fcgi.FCGI_RESPONDER: OGCServerHandler}).run()
| pbabik/OGCServer | conf/fcgi_app.py | Python | bsd-3-clause | 198 |
#contains different packages | riccardodg/lodstuff | lremap/it/cnr/ilc/lremap2owl/__init__.py | Python | gpl-3.0 | 28 |
#! /usr/bin/env python3
#Michiel Merx and Inne Lemstra 2017-01-24
#Not working still under construction
import subprocess
import time
def benchmark(outputPreStep, shortReads):
output = "../test_data/Soap2_alignment_paired.sam"
startTime = time.time()
debug = go(outputPreStep, shortReads, output)
endTime = time.time()
bmAlign = endTime - startTime
return([bmAlign, debug])
def go(indexOutputPath, shortReadspath, alignOutputPath):
comAlign = "soap -a {1} {2} -D {0} -o {3}"\
.format(indexOutputPath, shortReadspath[0],\
shortReadspath[1], alignOutputPath)
debug = subprocess.call(["/bin/bash", "-i", "-c", comAlign])
return(debug)
if __name__ == "main":
indexOutputPath = "./index_files/index_ecoli"
shortReadpath = "./sra_set.fasta"
alginOutputPath = "./alignments/ecoli"
debug = go(indexOutputPath, shortReadpath, alginOutputPath)
| MWJMerkx/pcfb_project | client/sra_modules/soap2Align.py | Python | gpl-3.0 | 863 |
# -*- coding: utf-8 -*-
"""
test_cookiecutter_invocation
----------------------------
Tests to make sure that cookiecutter can be called from the cli without
using the entry point set up for the package.
"""
import os
import pytest
import subprocess
import sys
from cookiecutter import utils
def test_should_raise_error_without_template_arg(capfd):
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(['python', '-m', 'cookiecutter.cli'])
_, err = capfd.readouterr()
exp_message = 'Error: Missing argument "template".'
assert exp_message in err
@pytest.fixture
def project_dir(request):
"""Remove the rendered project directory created by the test."""
rendered_dir = 'fake-project-templated'
def remove_generated_project():
if os.path.isdir(rendered_dir):
utils.rmtree(rendered_dir)
request.addfinalizer(remove_generated_project)
return rendered_dir
def test_should_invoke_main(monkeypatch, project_dir):
monkeypatch.setenv('PYTHONPATH', '.')
subprocess.check_call([
sys.executable,
'-m',
'cookiecutter.cli',
'tests/fake-repo-tmpl',
'--no-input'
])
assert os.path.isdir(project_dir)
| atlassian/cookiecutter | tests/test_cookiecutter_invocation.py | Python | bsd-3-clause | 1,239 |
# sbhs-timetable-python
# Copyright (C) 2015 Simon Shields, James Ye
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from urllib.parse import quote_plus as urlencode
COLOURS = {
'purple': {
'fg': '#b388ff',
'bg': '#000000',
'highBg': '#fff9c4',
'highFg': '#8bc34a'
},
'default': {
'fg': '#ffffff',
'bg': '#000000',
'highBg': '#e51c23',
'highFg': '#ffc107'
},
'red': {
'fg': '#e84e40',
'bg': '#000000',
'highBg': '#5af158',
'highFg': '#cddc39'
},
'green': {
'fg': '#8bc34a',
'bg': '#000000',
'highBg': '#bf360c',
'highFg': '#ffeb3b'
}
}
class Colour:
def __init__(self, obj):
self.data = obj
self.bg = self.data['bg']
self.fg = self.data['fg']
self.highBg = self.data['highBg']
self.highFg = self.data['highFg']
def __str__(self):
return '?bg=' + urlencode(self.data['bg']) + '&fg=' + urlencode(self.data['fg']) + '&highBg=' + urlencode(self.data['highBg']) + '&highFg=' + urlencode(self.data['highFg'])
def asdict(self):
return self.data
def get(name, invert):
res = COLOURS['default']
if name in COLOURS:
res = COLOURS[name]
if invert:
# don't accidentally overwrite all the things
copy = {}
for key in res:
copy[key] = res[key]
res = copy
(res['fg'],res['bg']) = (res['bg'],res['fg'])
(res['highFg'],res['highBg']) = (res['highBg'],res['highFg'])
return Colour(res)
def get_from_qs(query):
defs = COLOURS['default']
res = {}
for i in defs:
if i not in query:
res[i] = defs[i]
else:
if query[i][0] != '#':
res[i] = '#' + query[i]
else:
res[i] = query[i]
return Colour(res)
| sbhs-forkbombers/sbhs-timetable-python | sbhstimetable/colours.py | Python | agpl-3.0 | 2,506 |
import flask
from git_code_debt.server.servlets.index import Metric
from testing.assertions.response import assert_no_response_errors
def _test_it_loads(server):
response = server.client.get(flask.url_for('index.show'))
assert_no_response_errors(response)
# Should have a nonzero number of links to things
assert response.pq.find('a[href]')
return response
def test_it_loads_no_data(server):
_test_it_loads(server)
def test_it_loads_with_data(server_with_data):
resp = _test_it_loads(server_with_data.server)
assert 'CurseWords' not in resp.text
def test_metric_classname_overriden():
metric = Metric('metric', True, 0, (), '')
assert metric.classname == 'color-override'
def test_metric_classname_normal():
metric = Metric('metric', False, 0, (), '')
assert metric.classname == ''
| Yelp/git-code-debt | tests/server/servlets/index_test.py | Python | mit | 841 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2017 Ben Kurtovic <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module contains various "context" definitions, which are essentially flags
set during the tokenization process, either on the current parse stack (local
contexts) or affecting all stacks (global contexts). They represent the context
the tokenizer is in, such as inside a template's name definition, or inside a
level-two heading. This is used to determine what tokens are valid at the
current point and also if the current parsing route is invalid.
The tokenizer stores context as an integer, with these definitions bitwise OR'd
to set them, AND'd to check if they're set, and XOR'd to unset them. The
advantage of this is that contexts can have sub-contexts (as ``FOO == 0b11``
will cover ``BAR == 0b10`` and ``BAZ == 0b01``).
Local (stack-specific) contexts:
* :const:`TEMPLATE`
* :const:`TEMPLATE_NAME`
* :const:`TEMPLATE_PARAM_KEY`
* :const:`TEMPLATE_PARAM_VALUE`
* :const:`ARGUMENT`
* :const:`ARGUMENT_NAME`
* :const:`ARGUMENT_DEFAULT`
* :const:`WIKILINK`
* :const:`WIKILINK_TITLE`
* :const:`WIKILINK_TEXT`
* :const:`EXT_LINK`
* :const:`EXT_LINK_URI`
* :const:`EXT_LINK_TITLE`
* :const:`HEADING`
* :const:`HEADING_LEVEL_1`
* :const:`HEADING_LEVEL_2`
* :const:`HEADING_LEVEL_3`
* :const:`HEADING_LEVEL_4`
* :const:`HEADING_LEVEL_5`
* :const:`HEADING_LEVEL_6`
* :const:`TAG`
* :const:`TAG_OPEN`
* :const:`TAG_ATTR`
* :const:`TAG_BODY`
* :const:`TAG_CLOSE`
* :const:`STYLE`
* :const:`STYLE_ITALICS`
* :const:`STYLE_BOLD`
* :const:`STYLE_PASS_AGAIN`
* :const:`STYLE_SECOND_PASS`
* :const:`DL_TERM`
* :const:`SAFETY_CHECK`
* :const:`HAS_TEXT`
* :const:`FAIL_ON_TEXT`
* :const:`FAIL_NEXT`
* :const:`FAIL_ON_LBRACE`
* :const:`FAIL_ON_RBRACE`
* :const:`FAIL_ON_EQUALS`
* :const:`HAS_TEMPLATE`
* :const:`TABLE`
* :const:`TABLE_OPEN`
* :const:`TABLE_CELL_OPEN`
* :const:`TABLE_CELL_STYLE`
* :const:`TABLE_TD_LINE`
* :const:`TABLE_TH_LINE`
* :const:`TABLE_CELL_LINE_CONTEXTS`
* :const:`HTML_ENTITY`
Global contexts:
* :const:`GL_HEADING`
Aggregate contexts:
* :const:`FAIL`
* :const:`UNSAFE`
* :const:`DOUBLE`
* :const:`NO_WIKILINKS`
* :const:`NO_EXT_LINKS`
"""
# Local contexts:
TEMPLATE_NAME = 1 << 0
TEMPLATE_PARAM_KEY = 1 << 1
TEMPLATE_PARAM_VALUE = 1 << 2
TEMPLATE = TEMPLATE_NAME + TEMPLATE_PARAM_KEY + TEMPLATE_PARAM_VALUE
ARGUMENT_NAME = 1 << 3
ARGUMENT_DEFAULT = 1 << 4
ARGUMENT = ARGUMENT_NAME + ARGUMENT_DEFAULT
WIKILINK_TITLE = 1 << 5
WIKILINK_TEXT = 1 << 6
WIKILINK = WIKILINK_TITLE + WIKILINK_TEXT
EXT_LINK_URI = 1 << 7
EXT_LINK_TITLE = 1 << 8
EXT_LINK = EXT_LINK_URI + EXT_LINK_TITLE
HEADING_LEVEL_1 = 1 << 9
HEADING_LEVEL_2 = 1 << 10
HEADING_LEVEL_3 = 1 << 11
HEADING_LEVEL_4 = 1 << 12
HEADING_LEVEL_5 = 1 << 13
HEADING_LEVEL_6 = 1 << 14
HEADING = (HEADING_LEVEL_1 + HEADING_LEVEL_2 + HEADING_LEVEL_3 +
HEADING_LEVEL_4 + HEADING_LEVEL_5 + HEADING_LEVEL_6)
TAG_OPEN = 1 << 15
TAG_ATTR = 1 << 16
TAG_BODY = 1 << 17
TAG_CLOSE = 1 << 18
TAG = TAG_OPEN + TAG_ATTR + TAG_BODY + TAG_CLOSE
STYLE_ITALICS = 1 << 19
STYLE_BOLD = 1 << 20
STYLE_PASS_AGAIN = 1 << 21
STYLE_SECOND_PASS = 1 << 22
STYLE = STYLE_ITALICS + STYLE_BOLD + STYLE_PASS_AGAIN + STYLE_SECOND_PASS
DL_TERM = 1 << 23
HAS_TEXT = 1 << 24
FAIL_ON_TEXT = 1 << 25
FAIL_NEXT = 1 << 26
FAIL_ON_LBRACE = 1 << 27
FAIL_ON_RBRACE = 1 << 28
FAIL_ON_EQUALS = 1 << 29
HAS_TEMPLATE = 1 << 30
SAFETY_CHECK = (HAS_TEXT + FAIL_ON_TEXT + FAIL_NEXT + FAIL_ON_LBRACE +
FAIL_ON_RBRACE + FAIL_ON_EQUALS + HAS_TEMPLATE)
TABLE_OPEN = 1 << 31
TABLE_CELL_OPEN = 1 << 32
TABLE_CELL_STYLE = 1 << 33
TABLE_ROW_OPEN = 1 << 34
TABLE_TD_LINE = 1 << 35
TABLE_TH_LINE = 1 << 36
TABLE_CELL_LINE_CONTEXTS = TABLE_TD_LINE + TABLE_TH_LINE + TABLE_CELL_STYLE
TABLE = (TABLE_OPEN + TABLE_CELL_OPEN + TABLE_CELL_STYLE + TABLE_ROW_OPEN +
TABLE_TD_LINE + TABLE_TH_LINE)
HTML_ENTITY = 1 << 37
# Global contexts:
GL_HEADING = 1 << 0
# Aggregate contexts:
FAIL = (TEMPLATE + ARGUMENT + WIKILINK + EXT_LINK_TITLE + HEADING + TAG +
STYLE + TABLE)
UNSAFE = (TEMPLATE_NAME + WIKILINK_TITLE + EXT_LINK_TITLE +
TEMPLATE_PARAM_KEY + ARGUMENT_NAME + TAG_CLOSE)
DOUBLE = TEMPLATE_PARAM_KEY + TAG_CLOSE + TABLE_ROW_OPEN
NO_WIKILINKS = TEMPLATE_NAME + ARGUMENT_NAME + WIKILINK_TITLE + EXT_LINK_URI
NO_EXT_LINKS = TEMPLATE_NAME + ARGUMENT_NAME + WIKILINK_TITLE + EXT_LINK
| gencer/mwparserfromhell | mwparserfromhell/parser/contexts.py | Python | mit | 5,649 |
# -*- coding: utf-8 -*-
import os
import re
import shutil
import cv2
def getImageFile(srcFolder, dstFolder, pattern):
"""
指定フォルダ内にある正規表現にマッチしたファイルのみを指定フォルダにコピーする
"""
if not os.path.exists(dstFolder):
os.makedirs(dstFolder)
for fileName in os.listdir(srcFolder):
matchOB = re.match(pattern, fileName)
if matchOB:
srcFilePath = srcFolder + "/" + fileName
dstFilePath = dstFolder + "/" + matchOB.group().replace('_co', '').replace('_ir', '').replace('.png','.jpg')
#shutil.copyfile(srcFilePath, dstFilePath)
cv2.imwrite(dstFilePath, cv2.imread(srcFilePath))
if __name__ == "__main__":
pass | yizumi1012xxx/research-rcnn | cnn/cnn/src/util/image.py | Python | mit | 769 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
version_info = pbr.version.VersionInfo('vitrage_templates_ui')
| openstack/vitrage-dashboard | vitrage_dashboard/templates/version.py | Python | apache-2.0 | 656 |
from py_particle_processor_qt.py_particle_processor_qt import *
| DanielWinklehner/py_particle_processor | py_particle_processor_qt/__init__.py | Python | mit | 64 |
#
# The Python Imaging Library.
# $Id$
#
# PCX file handling
#
# This format was originally used by ZSoft's popular PaintBrush
# program for the IBM PC. It is also supported by many MS-DOS and
# Windows applications, including the Windows PaintBrush program in
# Windows 3.
#
# history:
# 1995-09-01 fl Created
# 1996-05-20 fl Fixed RGB support
# 1997-01-03 fl Fixed 2-bit and 4-bit support
# 1999-02-03 fl Fixed 8-bit support (broken in 1.0b1)
# 1999-02-07 fl Added write support
# 2002-06-09 fl Made 2-bit and 4-bit support a bit more robust
# 2002-07-30 fl Seek from to current position, not beginning of file
# 2003-06-03 fl Extract DPI settings (info["dpi"])
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.6"
import Image, ImageFile, ImagePalette
def i16(c,o):
return ord(c[o]) + (ord(c[o+1])<<8)
def _accept(prefix):
return ord(prefix[0]) == 10 and ord(prefix[1]) in [0, 2, 3, 5]
##
# Image plugin for Paintbrush images.
class PcxImageFile(ImageFile.ImageFile):
format = "PCX"
format_description = "Paintbrush"
def _open(self):
# header
s = self.fp.read(128)
if not _accept(s):
raise SyntaxError, "not a PCX file"
# image
bbox = i16(s,4), i16(s,6), i16(s,8)+1, i16(s,10)+1
if bbox[2] <= bbox[0] or bbox[3] <= bbox[1]:
raise SyntaxError, "bad PCX image size"
# format
version = ord(s[1])
bits = ord(s[3])
planes = ord(s[65])
stride = i16(s,66)
self.info["dpi"] = i16(s,12), i16(s,14)
if bits == 1 and planes == 1:
mode = rawmode = "1"
elif bits == 1 and planes in (2, 4):
mode = "P"
rawmode = "P;%dL" % planes
self.palette = ImagePalette.raw("RGB", s[16:64])
elif version == 5 and bits == 8 and planes == 1:
mode = rawmode = "L"
# FIXME: hey, this doesn't work with the incremental loader !!!
self.fp.seek(-769, 2)
s = self.fp.read(769)
if len(s) == 769 and ord(s[0]) == 12:
# check if the palette is linear greyscale
for i in range(256):
if s[i*3+1:i*3+4] != chr(i)*3:
mode = rawmode = "P"
break
if mode == "P":
self.palette = ImagePalette.raw("RGB", s[1:])
self.fp.seek(128)
elif version == 5 and bits == 8 and planes == 3:
mode = "RGB"
rawmode = "RGB;L"
else:
raise IOError, "unknown PCX mode"
self.mode = mode
self.size = bbox[2]-bbox[0], bbox[3]-bbox[1]
bbox = (0, 0) + self.size
self.tile = [("pcx", bbox, self.fp.tell(), (rawmode, planes * stride))]
# --------------------------------------------------------------------
# save PCX files
SAVE = {
# mode: (version, bits, planes, raw mode)
"1": (2, 1, 1, "1"),
"L": (5, 8, 1, "L"),
"P": (5, 8, 1, "P"),
"RGB": (5, 8, 3, "RGB;L"),
}
def o16(i):
return chr(i&255) + chr(i>>8&255)
def _save(im, fp, filename, check=0):
try:
version, bits, planes, rawmode = SAVE[im.mode]
except KeyError:
raise ValueError, "Cannot save %s images as PCX" % im.mode
if check:
return check
# bytes per plane
stride = (im.size[0] * bits + 7) / 8
# under windows, we could determine the current screen size with
# "Image.core.display_mode()[1]", but I think that's overkill...
screen = im.size
dpi = 100, 100
# PCX header
fp.write(
chr(10) + chr(version) + chr(1) + chr(bits) + o16(0) +
o16(0) + o16(im.size[0]-1) + o16(im.size[1]-1) + o16(dpi[0]) +
o16(dpi[1]) + chr(0)*24 + chr(255)*24 + chr(0) + chr(planes) +
o16(stride) + o16(1) + o16(screen[0]) + o16(screen[1]) +
chr(0)*54
)
assert fp.tell() == 128
ImageFile._save(im, fp, [("pcx", (0,0)+im.size, 0,
(rawmode, bits*planes))])
if im.mode == "P":
# colour palette
fp.write(chr(12))
fp.write(im.im.getpalette("RGB", "RGB")) # 768 bytes
elif im.mode == "L":
# greyscale palette
fp.write(chr(12))
for i in range(256):
fp.write(chr(i)*3)
# --------------------------------------------------------------------
# registry
Image.register_open("PCX", PcxImageFile, _accept)
Image.register_save("PCX", _save)
Image.register_extension("PCX", ".pcx")
| robiame/AndroidGeodata | pil/PcxImagePlugin.py | Python | mit | 4,834 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of MoaT, the Master of all Things.
##
## MoaT is Copyright © 2007-2016 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
## This header is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘scripts/_boilerplate.py’.
## Thus, do not remove the next line, or insert any blank lines above.
##BP
from django.views.generic import ListView,DetailView,CreateView,UpdateView,DeleteView
from django.forms import ModelForm
from rainman.models import Level,Valve
from irrigator.views import FormMixin,SiteParamMixin,get_profile
from rainman.utils import now
class LevelForm(ModelForm):
class Meta:
model = Level
exclude = ('forced','time','valve')
def save(self,commit=True):
if self.instance.id is None:
self.instance.time = now()
self.instance.valve = self.aux_data['valve']
self.instance.forced = True
return super(LevelForm,self).save(commit)
class LevelMixin(FormMixin):
model = Level
context_object_name = "level"
def get_queryset(self):
gu = get_profile(self.request)
return super(LevelMixin,self).get_queryset().filter(valve__in=gu.all_valves).order_by('-time')
class LevelsView(LevelMixin,SiteParamMixin,ListView):
context_object_name = "level_list"
opt_params = {'valve':Valve}
paginate_by=50
class LevelView(LevelMixin,DetailView):
def get_context_data(self,**k):
ctx = super(LevelView,self).get_context_data(**k)
try:
ctx['next_lv'] = self.get_queryset().filter(time__gt=ctx['level'].time).order_by('time')[0]
except IndexError:
ctx['next_lv'] = None
try:
ctx['prev_lv'] = self.get_queryset().filter(time__lt=ctx['level'].time).order_by('-time')[0]
except IndexError:
ctx['prev_lv'] = None
return ctx
class LevelNewView(LevelMixin,SiteParamMixin,CreateView):
form_class = LevelForm
success_url="/level/%(id)s"
opt_params = {'valve':Valve}
class LevelEditView(LevelMixin,UpdateView):
form_class = LevelForm
success_url="/level/%(id)s"
| smurfix/MoaT | irrigation/irrigator/views/level.py | Python | gpl-3.0 | 2,763 |
#0 seznam zvířat
dom_zvirata = ['pes','kočka','kralík','had']
#4 jména zvířat
moji_mazlicci = ['Chula', 'Gordo', 'Bondy', 'Alan', 'Sancho']
vasi_mazlicci = ['Chula', 'Gordo', 'Brok', 'Alfonso', 'Silák']
spolecni_mazlicci = []
jen_moji = []
jen_vasi = []
for jmena_1 in moji_mazlicci:
for jmena_2 in vasi_mazlicci:
if jmena_1 == jmena_2:
spolecni_mazlicci.append(jmena_1)
elif jmena_1 not in vasi_mazlicci:
jen_moji.append(jmena_1)
elif jmena_2 not in moji_mazlicci:
jen_vasi.append(jmena_2)
print("Společní mazličci jsou", spolecni_mazlicci)
print("Tohle jsou jen vaši mazlové: ", jen_vasi)
print("Tohle jsou jen moji mazlové: ", jen_moji)
#5. funkce, která seřadí mazlíky dle abecedy
def seradi_abeceda(seznam):
"""Seřadí daný seznam dle abecedy"""
print(sorted(seznam))
seradi_abeceda (dom_zvirata)
#6.priletela andulka
dom_zvirata.append("andulka")
print(dom_zvirata)
seznam_dvojic = []
dom_zvirata_nove = []
for zvire in dom_zvirata:
seznam_dvojic.append([zvire[1:], zvire])
print(sorted(seznam_dvojic))
for podseznamy in sorted(seznam_dvojic):
dom_zvirata_nove.append(podseznamy[1])
print(dom_zvirata_nove)
| Zuzanita/PyZuzu | DP_07_seznamy/Jmena_mazliku.py | Python | mit | 1,217 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.Model import Model
from scap.model.oval_5.sc.ItemType import ItemType
logger = logging.getLogger(__name__)
class PartitionItemElement(ItemType):
MODEL_MAP = {
'elements': [
{'tag_name': 'mount_point', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': 1},
{'tag_name': 'device', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': 1},
{'tag_name': 'uuid', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': 1},
{'tag_name': 'fs_type', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': 1},
{'tag_name': 'mount_options', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': 1},
{'tag_name': 'total_space', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': 1},
{'tag_name': 'space_used', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': 1},
{'tag_name': 'space_left', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': 1},
],
}
| cjaymes/pyscap | src/scap/model/oval_5/sc/linux/PartitionItemElement.py | Python | gpl-3.0 | 1,766 |
# Python test set -- part 5, built-in exceptions
from test_support import *
from types import ClassType
print '5. Built-in exceptions'
# XXX This is not really enough, each *operation* should be tested!
def test_raise_catch(exc):
try:
raise exc, "spam"
except exc, err:
buf = str(err)
try:
raise exc("spam")
except exc, err:
buf = str(err)
print buf
def r(thing):
test_raise_catch(thing)
if isinstance(thing, ClassType):
print thing.__name__
else:
print thing
r(AttributeError)
import sys
try: x = sys.undefined_attribute
except AttributeError: pass
r(EOFError)
import sys
fp = open(TESTFN, 'w')
fp.close()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
try:
try:
sys.stdin = fp
x = raw_input()
except EOFError:
pass
finally:
sys.stdin = savestdin
fp.close()
r(IOError)
try: open('this file does not exist', 'r')
except IOError: pass
r(ImportError)
try: import undefined_module
except ImportError: pass
r(IndexError)
x = []
try: a = x[10]
except IndexError: pass
r(KeyError)
x = {}
try: a = x['key']
except KeyError: pass
r(KeyboardInterrupt)
print '(not testable in a script)'
r(MemoryError)
print '(not safe to test)'
r(NameError)
try: x = undefined_variable
except NameError: pass
r(OverflowError)
x = 1
try:
while 1: x = x+x
except OverflowError: pass
r(RuntimeError)
print '(not used any more?)'
r(SyntaxError)
try: exec '/\n'
except SyntaxError: pass
# make sure the right exception message is raised for each of these
# code fragments:
def ckmsg(src, msg):
try:
compile(src, '<fragment>', 'exec')
except SyntaxError, e:
print e.msg
if e.msg == msg:
print "ok"
else:
print "expected:", msg
else:
print "failed to get expected SyntaxError"
s = '''\
while 1:
try:
pass
finally:
continue
'''
ckmsg(s, "'continue' not supported inside 'finally' clause")
s = '''\
try:
continue
except:
pass
'''
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
r(IndentationError)
r(TabError)
# can only be tested under -tt, and is the only test for -tt
#try: compile("try:\n\t1/0\n \t1/0\nfinally:\n pass\n", '<string>', 'exec')
#except TabError: pass
#else: raise TestFailed
r(SystemError)
print '(hard to reproduce)'
r(SystemExit)
import sys
try: sys.exit(0)
except SystemExit: pass
r(TypeError)
try: [] + ()
except TypeError: pass
r(ValueError)
try: x = chr(10000)
except ValueError: pass
r(ZeroDivisionError)
try: x = 1/0
except ZeroDivisionError: pass
r(Exception)
try: x = 1/0
except Exception, e: pass
unlink(TESTFN)
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/test/test_exceptions.py | Python | mit | 2,720 |
from Screens.Screen import Screen
from Screens.Dish import Dishpip
from enigma import ePoint, eSize, eRect, eServiceCenter, getBestPlayableServiceReference, eServiceReference, eTimer
from Components.SystemInfo import SystemInfo
from Components.VideoWindow import VideoWindow
from Components.config import config, ConfigPosition, ConfigYesNo, ConfigSelection
from Tools import Notifications
from Screens.MessageBox import MessageBox
from os import access, W_OK
MAX_X = 720
MAX_Y = 576
pip_config_initialized = False
PipPigModeEnabled = False
PipPigModeTimer = eTimer()
def timedStopPipPigMode():
from Screens.InfoBar import InfoBar
if InfoBar.instance and InfoBar.instance.session:
if SystemInfo["hasPIPVisibleProc"]:
open(SystemInfo["hasPIPVisibleProc"], "w").write("1")
elif hasattr(InfoBar.instance.session, "pip"):
InfoBar.instance.session.pip.playService(InfoBar.instance.session.pip.currentService)
global PipPigModeEnabled
PipPigModeEnabled = False
PipPigModeTimer.callback.append(timedStopPipPigMode)
def PipPigMode(value):
from Screens.InfoBar import InfoBar
if InfoBar.instance and InfoBar.instance.session and hasattr(InfoBar.instance.session, "pip") and config.av.pip_mode.value != "external":
if value:
PipPigModeTimer.stop()
global PipPigModeEnabled
if not PipPigModeEnabled:
if SystemInfo["hasPIPVisibleProc"]:
open(SystemInfo["hasPIPVisibleProc"], "w").write("0")
else:
InfoBar.instance.session.pip.pipservice = False
PipPigModeEnabled = True
else:
PipPigModeTimer.start(100, True)
class PictureInPictureZapping(Screen):
skin = """<screen name="PictureInPictureZapping" flags="wfNoBorder" position="50,50" size="90,26" title="PiPZap" zPosition="-1">
<eLabel text="PiP-Zap" position="0,0" size="90,26" foregroundColor="#00ff66" font="Regular;26" />
</screen>"""
class PictureInPicture(Screen):
def __init__(self, session):
global pip_config_initialized
Screen.__init__(self, session)
self["video"] = VideoWindow()
self.pipActive = session.instantiateDialog(PictureInPictureZapping)
self.dishpipActive = session.instantiateDialog(Dishpip)
self.currentService = None
self.currentServiceReference = None
self.choicelist = [("standard", _("Standard"))]
if SystemInfo["VideoDestinationConfigurable"]:
self.choicelist.append(("cascade", _("Cascade PiP")))
self.choicelist.append(("split", _("Splitscreen")))
self.choicelist.append(("byside", _("Side by side")))
self.choicelist.append(("bigpig", _("Big PiP")))
if SystemInfo["HasExternalPIP"]:
self.choicelist.append(("external", _("External PiP")))
if not pip_config_initialized:
config.av.pip = ConfigPosition(default=[510, 28, 180, 135], args = (MAX_X, MAX_Y, MAX_X, MAX_Y))
config.av.pip_mode = ConfigSelection(default="standard", choices=self.choicelist)
pip_config_initialized = True
self.onLayoutFinish.append(self.LayoutFinished)
def __del__(self):
del self.pipservice
self.setExternalPiP(False)
self.setSizePosMainWindow()
if hasattr(self, "dishpipActive") and self.dishpipActive is not None:
self.dishpipActive.setHide()
def relocate(self):
x = config.av.pip.value[0]
y = config.av.pip.value[1]
w = config.av.pip.value[2]
h = config.av.pip.value[3]
self.move(x, y)
self.resize(w, h)
def LayoutFinished(self):
self.onLayoutFinish.remove(self.LayoutFinished)
self.relocate()
self.setExternalPiP(config.av.pip_mode.value == "external")
def move(self, x, y):
if config.av.pip_mode.value == 2:
self.instance.move(ePoint(370, 152))
return
w = config.av.pip.value[2]
if config.av.pip_mode.value == 1:
x = 720 - w
y = 0
config.av.pip.value[0] = x
config.av.pip.value[1] = y
w = config.av.pip.value[2]
h = config.av.pip.value[3]
if config.av.pip_mode.value == "cascade":
x = MAX_X - w
y = 0
elif config.av.pip_mode.value == "split":
x = MAX_X / 2
y = 0
elif config.av.pip_mode.value == "byside":
x = MAX_X / 2
y = MAX_Y / 4
elif config.av.pip_mode.value in "bigpig external":
x = 0
y = 0
config.av.pip.save()
self.instance.move(ePoint(x, y))
def resize(self, w, h):
if config.av.pip_mode.value == 2:
self.instance.resize(eSize(*(340, 264)))
self["video"].instance.resize(eSize(*(340, 264)))
self.setSizePosMainWindow(0, 142, 360, 284)
return
config.av.pip.value[2] = w
config.av.pip.value[3] = h
config.av.pip.save()
if config.av.pip_mode.value == "standard":
self.instance.resize(eSize(*(w, h)))
self["video"].instance.resize(eSize(*(w, h)))
self.setSizePosMainWindow()
elif config.av.pip_mode.value == "cascade":
self.instance.resize(eSize(*(w, h)))
self["video"].instance.resize(eSize(*(w, h)))
self.setSizePosMainWindow(0, h, MAX_X - w, MAX_Y - h)
elif config.av.pip_mode.value == "split":
self.instance.resize(eSize(*(MAX_X/2, MAX_Y )))
self["video"].instance.resize(eSize(*(MAX_X/2, MAX_Y)))
self.setSizePosMainWindow(0, 0, MAX_X/2, MAX_Y)
elif config.av.pip_mode.value == "byside":
self.instance.resize(eSize(*(MAX_X/2, MAX_Y/2 )))
self["video"].instance.resize(eSize(*(MAX_X/2, MAX_Y/2)))
self.setSizePosMainWindow(0, MAX_Y/4, MAX_X/2, MAX_Y/2)
elif config.av.pip_mode.value in "bigpig external":
self.instance.resize(eSize(*(MAX_X, MAX_Y)))
self["video"].instance.resize(eSize(*(MAX_X, MAX_Y)))
self.setSizePosMainWindow()
def setSizePosMainWindow(self, x = 0, y = 0, w = 0, h = 0):
if SystemInfo["VideoDestinationConfigurable"]:
self["video"].instance.setFullScreenPosition(eRect(x, y, w, h))
def setExternalPiP(self, onoff):
if SystemInfo["HasExternalPIP"]:
open(SystemInfo["HasExternalPIP"], "w").write(onoff and "on" or "off")
def active(self):
self.pipActive.show()
def inactive(self):
self.pipActive.hide()
def getPosition(self):
return self.instance.position().x(), self.instance.position().y()
def getSize(self):
return self.instance.size().width(), self.instance.size().height()
def togglePiPMode(self):
self.setMode(config.av.pip_mode.choices[(config.av.pip_mode.index + 1) % len(config.av.pip_mode.choices)])
def setMode(self, mode):
config.av.pip_mode.value = mode
config.av.pip_mode.save()
self.setExternalPiP(config.av.pip_mode.value == "external")
self.relocate()
def getMode(self):
return config.av.pip_mode.value
def getModeName(self):
return self.choicelist[config.av.pip_mode.index][1]
def playService(self, service):
if service is None:
return False
ref = self.resolveAlternatePipService(service)
if ref:
if self.isPlayableForPipService(ref):
print "playing pip service", ref and ref.toString()
else:
if not config.usage.hide_zap_errors.value:
Notifications.AddPopup(text = _("No free tuner!"), type = MessageBox.TYPE_ERROR, timeout = 5, id = "ZapPipError")
return False
self.pipservice = eServiceCenter.getInstance().play(ref)
if self.pipservice and not self.pipservice.setTarget(1):
if hasattr(self, "dishpipActive") and self.dishpipActive is not None:
self.dishpipActive.startPiPService(ref)
self.pipservice.start()
self.currentService = service
self.currentServiceReference = ref
return True
else:
self.pipservice = None
self.currentService = None
self.currentServiceReference = None
if not config.usage.hide_zap_errors.value:
Notifications.AddPopup(text = _("Incorrect type service for PiP!"), type = MessageBox.TYPE_ERROR, timeout = 5, id = "ZapPipError")
return False
def getCurrentService(self):
return self.currentService
def getCurrentServiceReference(self):
return self.currentServiceReference
def isPlayableForPipService(self, service):
playingref = self.session.nav.getCurrentlyPlayingServiceReference()
if playingref is None or service == playingref:
return True
info = eServiceCenter.getInstance().info(service)
oldref = self.currentServiceReference or eServiceReference()
if info and info.isPlayable(service, oldref):
return True
return False
def resolveAlternatePipService(self, service):
if service and (service.flags & eServiceReference.isGroup):
oldref = self.currentServiceReference or eServiceReference()
return getBestPlayableServiceReference(service, oldref)
return service
| XTAv2/Enigma2 | lib/python/Screens/PictureInPicture.py | Python | gpl-2.0 | 8,262 |
#!/usr/bin/env python
#-
# Copyright (c) 2010 Gleb Kurtsou
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD: soc2013/dpl/head/tools/tools/shlib-compat/shlib-compat.py 249894 2013-03-25 00:31:14Z gleb $
import os
import sys
import re
import optparse
class Config(object):
version = '0.1'
# controlled by user
verbose = 0
dump = False
no_dump = False
version_filter = None
symbol_filter = None
alias_prefixes = []
# misc opts
objdump = 'objdump'
dwarfdump = 'dwarfdump'
# debug
cmpcache_enabled = True
dwarfcache_enabled = True
w_alias = True
w_cached = False
w_symbol = True
class FileConfig(object):
filename = None
out = sys.stdout
def init(self, outname):
if outname and outname != '-':
self.out = open(outname, "w")
origfile = FileConfig()
newfile = FileConfig()
@classmethod
def init(cls):
cls.version_filter = StrFilter()
cls.symbol_filter = StrFilter()
class App(object):
result_code = 0
def warn(cond, msg):
if cond:
print >> sys.stderr, "WARN: " + msg
# {{{ misc
class StrFilter(object):
def __init__(self):
self.exclude = []
self.include = []
def compile(self):
self.re_exclude = [ re.compile(x) for x in self.exclude ]
self.re_include = [ re.compile(x) for x in self.include ]
def match(self, s):
if len(self.re_include):
matched = False
for r in self.re_include:
if r.match(s):
matched = True
break
if not matched:
return False
for r in self.re_exclude:
if r.match(s):
return False
return True
class Cache(object):
class CacheStats(object):
def __init__(self):
self.hit = 0
self.miss = 0
def show(self, name):
total = self.hit + self.miss
if total == 0:
ratio = '(undef)'
else:
ratio = '%f' % (self.hit/float(total))
return '%s cache stats: hit: %d; miss: %d; ratio: %s' % \
(name, self.hit, self.miss, ratio)
def __init__(self, enabled=True, stats=None):
self.enabled = enabled
self.items = {}
if stats == None:
self.stats = Cache.CacheStats()
else:
self.stats = stats
def get(self, id):
if self.enabled and self.items.has_key(id):
self.stats.hit += 1
return self.items[id]
else:
self.stats.miss += 1
return None
def put(self, id, obj):
if self.enabled:
if self.items.has_key(id) and obj is not self.items[id]:
#raise ValueError("Item is already cached: %d (%s, %s)" %
# (id, self.items[id], obj))
warn(Config.w_cached, "Item is already cached: %d (%s, %s)" % \
(id, self.items[id], obj))
self.items[id] = obj
def replace(self, id, obj):
if self.enabled:
assert self.items.has_key(id)
self.items[id] = obj
class ListDiff(object):
def __init__(self, orig, new):
self.orig = set(orig)
self.new = set(new)
self.common = self.orig & self.new
self.added = self.new - self.common
self.removed = self.orig - self.common
class PrettyPrinter(object):
def __init__(self):
self.stack = []
def run_nested(self, obj):
ex = obj._pp_ex(self)
self.stack.append(ex)
def run(self, obj):
self._result = obj._pp(self)
return self._result
def nested(self):
return sorted(set(self.stack))
def result(self):
return self._result;
# }}}
#{{{ symbols and version maps
class Symbol(object):
def __init__(self, name, offset, version, lib):
self.name = name
self.offset = offset
self.version = version
self.lib = lib
self.definition = None
@property
def name_ver(self):
return self.name + '@' + self.version
def __repr__(self):
return "Symbol(%s, 0x%x, %s)" % (self.name, self.offset, self.version)
class CommonSymbol(object):
def __init__(self, origsym, newsym):
if origsym.name != newsym.name or origsym.version != newsym.version:
raise RuntimeError("Symbols have different names: %s",
[origsym, newsym])
self.origsym = origsym
self.newsym = newsym
self.name = newsym.name
self.version = newsym.version
def __repr__(self):
return "CommonSymbol(%s, %s)" % (self.name, self.version)
class SymbolAlias(object):
def __init__(self, alias, prefix, offset):
assert alias.startswith(prefix)
self.alias = alias
self.name = alias[len(prefix):]
self.offset = offset
def __repr__(self):
return "SymbolAlias(%s, 0x%x)" % (self.alias, self.offset)
class VersionMap(object):
def __init__(self, name):
self.name = name
self.symbols = {}
def append(self, symbol):
if (self.symbols.has_key(symbol.name)):
raise ValueError("Symbol is already defined %s@%s" %
(symbol.name, self.name))
self.symbols[symbol.name] = symbol
def names(self):
return self.symbols.keys()
def __repr__(self):
return repr(self.symbols.values())
# }}}
# {{{ types and definitions
class Def(object):
_is_alias = False
def __init__(self, id, name, **kwargs):
self.id = id
self.name = name
self.attrs = kwargs
def __getattr__(self, attr):
if not self.attrs.has_key(attr):
raise AttributeError('%s in %s' % (attr, str(self)))
return self.attrs[attr]
def _name_opt(self, default=''):
if not self.name:
return default
return self.name
def _alias(self):
if self._is_alias:
return self.type._alias()
return self
def __cmp__(self, other):
# TODO assert 'self' and 'other' belong to different libraries
#print 'cmp defs: %s, %s' % (self, other)
a = self._alias()
try:
b = other._alias()
except AttributeError:
return 1
r = cmp(a.__class__, b.__class__)
if r == 0:
if a.id != 0 and b.id != 0:
ind = (long(a.id) << 32) + b.id
r = Dwarf.cmpcache.get(ind)
if r != None:
return r
else:
ind = 0
r = cmp(a.attrs, b.attrs)
if ind != 0:
Dwarf.cmpcache.put(ind, r)
else:
r = 0
#raise RuntimeError('Comparing different classes: %s, %s' %
# (a.__class__.__name__, b.__class__.__name__))
return r
def __repr__(self):
p = []
if hasattr(self, 'name'):
p.append("name=%s" % self.name)
for (k, v) in self.attrs.items():
if isinstance(v, Def):
v = v.__class__.__name__ + '(...)'
p.append("%s=%s" % (k, v))
return self.__class__.__name__ + '(' + ', '.join(p) + ')'
def _mapval(self, param, vals):
if param not in vals.keys():
raise NotImplementedError("Invalid value '%s': %s" %
(param, str(self)))
return vals[param]
def _pp_ex(self, pp):
raise NotImplementedError('Extended pretty print not implemeted: %s' %
str(self))
def _pp(self, pp):
raise NotImplementedError('Pretty print not implemeted: %s' % str(self))
class AnonymousDef(Def):
def __init__(self, id, **kwargs):
Def.__init__(self, id, None, **kwargs)
class Void(AnonymousDef):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Void, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def __init__(self):
AnonymousDef.__init__(self, 0)
def _pp(self, pp):
return "void"
class VarArgs(AnonymousDef):
def _pp(self, pp):
return "..."
class PointerDef(AnonymousDef):
def _pp(self, pp):
t = pp.run(self.type)
return "%s*" % (t,)
class BaseTypeDef(Def):
inttypes = ['DW_ATE_signed', 'DW_ATE_unsigned', 'DW_ATE_unsigned_char']
def _pp(self, pp):
if self.encoding in self.inttypes:
sign = '' if self.encoding == 'DW_ATE_signed' else 'u'
bits = int(self.byte_size) * 8
return '%sint%s_t' % (sign, bits)
elif self.encoding == 'DW_ATE_signed_char' and int(self.byte_size) == 1:
return 'char';
elif self.encoding == 'DW_ATE_float':
return self._mapval(self.byte_size, {
'16': 'long double',
'8': 'double',
'4': 'float',
})
raise NotImplementedError('Invalid encoding: %s' % self)
class TypeAliasDef(Def):
_is_alias = True
def _pp(self, pp):
alias = self._alias()
# push typedef name
if self.name and not alias.name:
alias.name = 'T(%s)' % self.name
# return type with modifiers
return self.type._pp(pp)
class EnumerationTypeDef(Def):
def _pp(self, pp):
return 'enum ' + self._name_opt('UNKNOWN')
class ConstTypeDef(AnonymousDef):
_is_alias = True
def _pp(self, pp):
return 'const ' + self.type._pp(pp)
class VolatileTypeDef(AnonymousDef):
_is_alias = True
def _pp(self, pp):
return 'volatile ' + self.type._pp(pp)
class ArrayDef(AnonymousDef):
def _pp(self, pp):
t = pp.run(self.type)
assert len(self.subranges) == 1
try:
sz = int(self.subranges[0].upper_bound) + 1
except ValueError:
s = re.sub(r'\(.+\)', '', self.subranges[0].upper_bound)
sz = int(s) + 1
return '%s[%s]' % (t, sz)
class ArraySubrangeDef(AnonymousDef):
pass
class FunctionDef(Def):
def _pp(self, pp):
result = pp.run(self.result)
if not self.params:
params = "void"
else:
params = ', '.join([ pp.run(x) for x in self.params ])
return "%s %s(%s);" % (result, self.name, params)
class FunctionTypeDef(Def):
def _pp(self, pp):
result = pp.run(self.result)
if not self.params:
params = "void"
else:
params = ', '.join([ pp.run(x) for x in self.params ])
return "F(%s, %s, (%s))" % (self._name_opt(), result, params)
class ParameterDef(Def):
def _pp(self, pp):
t = pp.run(self.type)
return "%s %s" % (t, self._name_opt())
# TODO
class StructForwardDef(Def):
pass
class IncompleteDef(Def):
def update(self, complete, cache=None):
self.complete = complete
complete.incomplete = self
if cache != None:
cached = cache.get(self.id)
if cached != None and isinstance(cached, IncompleteDef):
cache.replace(self.id, complete)
class StructIncompleteDef(IncompleteDef):
def _pp(self, pp):
return "struct %s" % (self.name,)
class UnionIncompleteDef(IncompleteDef):
def _pp(self, pp):
return "union %s" % (self.name,)
class StructDef(Def):
def _pp_ex(self, pp, suffix=';'):
members = [ pp.run(x) for x in self.members ]
return "struct %s { %s }%s" % \
(self._name_opt(), ' '.join(members), suffix)
def _pp(self, pp):
if self.name:
pp.run_nested(self)
return "struct %s" % (self.name,)
else:
return self._pp_ex(pp, suffix='')
class UnionDef(Def):
def _pp_ex(self, pp, suffix=';'):
members = [ pp.run(x) for x in self.members ]
return "union %s { %s }%s" % \
(self._name_opt(), ' '.join(members), suffix)
def _pp(self, pp):
if self.name:
pp.run_nested(self)
return "union %s" % (self.name,)
else:
return self._pp_ex(pp, suffix='')
class MemberDef(Def):
def _pp(self, pp):
t = pp.run(self.type)
if self.bit_size:
bits = ":%s" % self.bit_size
else:
bits = ""
return "%s %s%s;" % (t, self._name_opt(), bits)
class Dwarf(object):
cmpcache = Cache(enabled=Config.cmpcache_enabled)
def __init__(self, dump):
self.dump = dump
def _build_optarg_type(self, praw):
type = praw.optarg('type', Void())
if type != Void():
type = self.buildref(praw.unit, type)
return type
def build_subprogram(self, raw):
if raw.optname == None:
raw.setname('SUBPROGRAM_NONAME_' + raw.arg('low_pc'));
params = [ self.build(x) for x in raw.nested ]
result = self._build_optarg_type(raw)
return FunctionDef(raw.id, raw.name, params=params, result=result)
def build_subroutine_type(self, raw):
params = [ self.build(x) for x in raw.nested ]
result = self._build_optarg_type(raw)
return FunctionTypeDef(raw.id, raw.optname, params=params, result=result)
def build_formal_parameter(self, raw):
type = self._build_optarg_type(raw)
return ParameterDef(raw.id, raw.optname, type=type)
def build_pointer_type(self, raw):
type = self._build_optarg_type(raw)
return PointerDef(raw.id, type=type)
def build_member(self, raw):
type = self.buildref(raw.unit, raw.arg('type'))
return MemberDef(raw.id, raw.name, type=type,
bit_size=raw.optarg('bit_size', None))
def build_structure_type(self, raw):
incomplete = raw.unit.incomplete.get(raw.id)
if incomplete == None:
incomplete = StructIncompleteDef(raw.id, raw.optname)
raw.unit.incomplete.put(raw.id, incomplete)
else:
return incomplete
members = [ self.build(x) for x in raw.nested ]
byte_size = raw.optarg('byte_size', None)
if byte_size == None:
obj = StructForwardDef(raw.id, raw.name, members=members,
forcename=raw.name)
obj = StructDef(raw.id, raw.optname, members=members,
byte_size=byte_size)
incomplete.update(obj, cache=raw.unit.cache)
return obj
def build_union_type(self, raw):
incomplete = raw.unit.incomplete.get(raw.id)
if incomplete == None:
incomplete = UnionIncompleteDef(raw.id, raw.optname)
raw.unit.incomplete.put(raw.id, incomplete)
else:
return incomplete
members = [ self.build(x) for x in raw.nested ]
byte_size = raw.optarg('byte_size', None)
obj = UnionDef(raw.id, raw.optname, members=members,
byte_size=byte_size)
obj.incomplete = incomplete
incomplete.complete = obj
return obj
def build_typedef(self, raw):
type = self._build_optarg_type(raw)
return TypeAliasDef(raw.id, raw.name, type=type)
def build_const_type(self, raw):
type = self._build_optarg_type(raw)
return ConstTypeDef(raw.id, type=type)
def build_volatile_type(self, raw):
type = self._build_optarg_type(raw)
return VolatileTypeDef(raw.id, type=type)
def build_enumeration_type(self, raw):
# TODO handle DW_TAG_enumerator ???
return EnumerationTypeDef(raw.id, name=raw.optname,
byte_size=raw.arg('byte_size'))
def build_base_type(self, raw):
return BaseTypeDef(raw.id, raw.optname,
byte_size=raw.arg('byte_size'), encoding=raw.arg('encoding'))
def build_array_type(self, raw):
type = self.buildref(raw.unit, raw.arg('type'))
subranges = [ self.build(x) for x in raw.nested ]
return ArrayDef(raw.id, type=type, subranges=subranges)
def build_subrange_type(self, raw):
type = self.buildref(raw.unit, raw.arg('type'))
return ArraySubrangeDef(raw.id, type=type,
upper_bound=raw.optarg('upper_bound', 0))
def build_unspecified_parameters(self, raw):
return VarArgs(raw.id)
def _get_id(self, id):
try:
return int(id)
except ValueError:
if (id.startswith('<') and id.endswith('>')):
return int(id[1:-1])
else:
raise ValueError("Invalid dwarf id: %s" % id)
def build(self, raw):
obj = raw.unit.cache.get(raw.id)
if obj != None:
return obj
builder_name = raw.tag.replace('DW_TAG_', 'build_')
try:
builder = getattr(self, builder_name)
except AttributeError:
raise AttributeError("Unknown dwarf tag: %s" % raw)
obj = builder(raw)
raw.unit.cache.put(obj.id, obj)
return obj
def buildref(self, unit, id):
id = self._get_id(id)
raw = unit.tags[id]
obj = self.build(raw)
return obj
# }}}
class Shlib(object):
def __init__(self, libfile):
self.libfile = libfile
self.versions = {}
self.alias_syms = {}
def parse_objdump(self):
objdump = ObjdumpParser(self.libfile)
objdump.run()
for p in objdump.dynamic_symbols:
vername = p['ver']
if vername.startswith('(') and vername.endswith(')'):
vername = vername[1:-1]
if not Config.version_filter.match(vername):
continue
if not Config.symbol_filter.match(p['symbol']):
continue
sym = Symbol(p['symbol'], p['offset'], vername, self)
if not self.versions.has_key(vername):
self.versions[vername] = VersionMap(vername)
self.versions[vername].append(sym)
if Config.alias_prefixes:
self.local_offsetmap = objdump.local_offsetmap
for p in objdump.local_symbols:
for prefix in Config.alias_prefixes:
if not p['symbol'].startswith(prefix):
continue
alias = SymbolAlias(p['symbol'], prefix, p['offset'])
if self.alias_syms.has_key(alias.name):
prevalias = self.alias_syms[alias.name]
if alias.name != prevalias.name or \
alias.offset != prevalias.offset:
warn(Config.w_alias, "Symbol alias is " \
"already defined: %s: %s at %08x -- %s at %08x" % \
(alias.alias, alias.name, alias.offset,
prevalias.name, prevalias.offset))
self.alias_syms[alias.name] = alias
def parse_dwarfdump(self):
dwarfdump = DwarfdumpParser(self.libfile)
def lookup(sym):
raw = None
try:
raw = dwarfdump.offsetmap[sym.offset]
except:
try:
localnames = self.local_offsetmap[sym.offset]
localnames.sort(key=lambda x: -len(x))
for localname in localnames:
if not self.alias_syms.has_key(localname):
continue
alias = self.alias_syms[localname]
raw = dwarfdump.offsetmap[alias.offset]
break
except:
pass
return raw
dwarfdump.run()
dwarf = Dwarf(dwarfdump)
for ver in self.versions.values():
for sym in ver.symbols.values():
raw = lookup(sym);
if not raw:
warn(Config.w_symbol, "Symbol %s (%s) not found at offset 0x%x" % \
(sym.name_ver, self.libfile, sym.offset))
continue
if Config.verbose >= 3:
print "Parsing symbol %s (%s)" % (sym.name_ver, self.libfile)
sym.definition = dwarf.build(raw)
def parse(self):
if not os.path.isfile(self.libfile):
print >> sys.stderr, ("No such file: %s" % self.libfile)
sys.exit(1)
self.parse_objdump()
self.parse_dwarfdump()
# {{{ parsers
class Parser(object):
def __init__(self, proc):
self.proc = proc
self.parser = self.parse_begin
def run(self):
fd = os.popen(self.proc, 'r')
while True:
line = fd.readline()
if (not line):
break
line = line.strip()
if (line):
self.parser(line)
err = fd.close()
if err:
print >> sys.stderr, ("Execution failed: %s" % self.proc)
sys.exit(2)
def parse_begin(self, line):
print(line)
class ObjdumpParser(Parser):
re_header = re.compile('(?P<table>\w*)\s*SYMBOL TABLE:')
re_local_symbol = re.compile('(?P<offset>[0-9a-fA-F]+)\s+(?P<bind>\w+)\s+(?P<type>\w+)\s+(?P<section>[^\s]+)\s+(?P<foffset>[0-9a-fA-F]+)\s*(?P<symbol>[^\s]*)')
re_lame_symbol = re.compile('(?P<offset>[0-9a-fA-F]+)\s+(?P<bind>\w+)\s+\*[A-Z]+\*')
re_dynamic_symbol = re.compile('(?P<offset>[0-9a-fA-F]+)\s+(?P<bind>\w+)\s+(?P<type>\w+)\s+(?P<section>[^\s]+)\s+(?P<foffset>[0-9a-fA-F]+)\s*(?P<ver>[^\s]*)\s*(?P<symbol>[^\s]*)')
def __init__(self, libfile):
Parser.__init__(self, "%s -wtT %s" % (Config.objdump, libfile))
self.dynamic_symbols = []
self.local_symbols = []
self.local_offsetmap = {}
def parse_begin(self, line):
self.parse_header(line)
def add_symbol(self, table, symbol, offsetmap = None):
offset = int(symbol['offset'], 16);
symbol['offset'] = offset
if (offset == 0):
return
table.append(symbol)
if offsetmap != None:
if not offsetmap.has_key(offset):
offsetmap[offset] = [symbol['symbol']]
else:
offsetmap[offset].append(symbol['symbol'])
def parse_header(self, line):
m = self.re_header.match(line)
if (m):
table = m.group('table')
if (table == "DYNAMIC"):
self.parser = self.parse_dynamic
elif table == '':
self.parser = self.parse_local
else:
raise ValueError("Invalid symbol table: %s" % table)
return True
return False
def parse_local(self, line):
if (self.parse_header(line)):
return
if (self.re_lame_symbol.match(line)):
return
m = self.re_local_symbol.match(line)
if (not m):
return
#raise ValueError("Invalid symbol definition: %s" % line)
p = m.groupdict()
if (p['symbol'] and p['symbol'].find('@') == -1):
self.add_symbol(self.local_symbols, p, self.local_offsetmap);
def parse_dynamic(self, line):
if (self.parse_header(line)):
return
if (self.re_lame_symbol.match(line)):
return
m = self.re_dynamic_symbol.match(line)
if (not m):
raise ValueError("Invalid symbol definition: %s" % line)
p = m.groupdict()
if (p['symbol'] and p['ver']):
self.add_symbol(self.dynamic_symbols, p);
class DwarfdumpParser(Parser):
tagcache_stats = Cache.CacheStats()
class Unit(object):
def __init__(self):
self.cache = Cache(enabled=Config.dwarfcache_enabled,
stats=DwarfdumpParser.tagcache_stats)
self.incomplete = Cache()
self.tags = {}
class Tag(object):
def __init__(self, unit, data):
self.unit = unit
self.id = int(data['id'])
self.level = int(data['level'])
self.tag = data['tag']
self.args = {}
self.nested = []
@property
def name(self):
return self.arg('name')
@property
def optname(self):
return self.optarg('name', None)
def setname(self, name):
self.args['DW_AT_name'] = name
def arg(self, a):
name = 'DW_AT_' + a
try:
return self.args[name]
except KeyError:
raise KeyError("Argument '%s' not found in %s: %s" %
(name, self, self.args))
def optarg(self, a, default):
try:
return self.arg(a)
except KeyError:
return default
def __repr__(self):
return "Tag(%d, %d, %s)" % (self.level, self.id, self.tag)
re_header = re.compile('<(?P<level>\d+)><(?P<id>\d+\+*\d*)><(?P<tag>\w+)>')
re_argname = re.compile('(?P<arg>\w+)<')
re_argunknown = re.compile('<Unknown AT value \w+><[^<>]+>')
skip_tags = set([
'DW_TAG_lexical_block',
'DW_TAG_inlined_subroutine',
'DW_TAG_label',
'DW_TAG_variable',
])
def __init__(self, libfile):
Parser.__init__(self, "%s -di %s" % (Config.dwarfdump, libfile))
self.current_unit = None
self.offsetmap = {}
self.stack = []
def parse_begin(self, line):
if line == '.debug_info':
self.parser = self.parse_debuginfo
else:
raise ValueError("Invalid dwarfdump header: %s" % line)
def parse_argvalue(self, args):
assert args.startswith('<')
i = 1
cnt = 1
while i < len(args) and args[i]:
if args[i] == '<':
cnt += 1
elif args[i] == '>':
cnt -= 1
if cnt == 0:
break
i = i + 1
value = args[1:i]
args = args[i+1:]
return (args, value)
def parse_arg(self, tag, args):
m = self.re_argname.match(args)
if not m:
m = self.re_argunknown.match(args)
if not m:
raise ValueError("Invalid dwarfdump: couldn't parse arguments: %s" %
args)
args = args[len(m.group(0)):].lstrip()
return args
argname = m.group('arg')
args = args[len(argname):]
value = []
while len(args) > 0 and args.startswith('<'):
(args, v) = self.parse_argvalue(args)
value.append(v)
args = args.lstrip()
if len(value) == 1:
value = value[0]
tag.args[argname] = value
return args
def parse_debuginfo(self, line):
m = self.re_header.match(line)
if not m:
raise ValueError("Invalid dwarfdump: %s" % line)
if m.group('level') == '0':
self.current_unit = DwarfdumpParser.Unit()
return
tag = DwarfdumpParser.Tag(self.current_unit, m.groupdict())
args = line[len(m.group(0)):].lstrip()
while args:
args = self.parse_arg(tag, args)
tag.unit.tags[tag.id] = tag
if tag.args.has_key('DW_AT_low_pc') and \
tag.tag not in DwarfdumpParser.skip_tags:
offset = int(tag.args['DW_AT_low_pc'], 16)
if self.offsetmap.has_key(offset):
raise ValueError("Dwarf dump parse error: " +
"symbol is aleady defined at offset 0x%x" % offset)
self.offsetmap[offset] = tag
if len(self.stack) > 0:
prev = self.stack.pop()
while prev.level >= tag.level and len(self.stack) > 0:
prev = self.stack.pop()
if prev.level < tag.level:
assert prev.level == tag.level - 1
# TODO check DW_AT_sibling ???
if tag.tag not in DwarfdumpParser.skip_tags:
prev.nested.append(tag)
self.stack.append(prev)
self.stack.append(tag)
assert len(self.stack) == tag.level
# }}}
def list_str(l):
l = [ str(x) for x in l ]
l.sort()
return ', '.join(l)
def names_ver_str(vername, names):
return list_str([ x + "@" + vername for x in names ])
def common_symbols(origlib, newlib):
result = []
verdiff = ListDiff(origlib.versions.keys(), newlib.versions.keys())
if Config.verbose >= 1:
print 'Original versions: ', list_str(verdiff.orig)
print 'New versions: ', list_str(verdiff.new)
for vername in verdiff.added:
print 'Added version: ', vername
print ' Added symbols: ', \
names_ver_str(vername, newlib.versions[vername].names())
for vername in verdiff.removed:
print 'Removed version: ', vername
print ' Removed symbols: ', \
names_ver_str(vername, origlib.versions[vername].names())
added = []
removed = []
for vername in verdiff.common:
origver = origlib.versions[vername]
newver = newlib.versions[vername]
namediff = ListDiff(origver.names(), newver.names())
if namediff.added:
added.append(names_ver_str(vername, namediff.added))
if namediff.removed:
removed.append(names_ver_str(vername, namediff.removed))
commonver = VersionMap(vername)
result.append(commonver)
for n in namediff.common:
sym = CommonSymbol(origver.symbols[n], newver.symbols[n])
commonver.append(sym)
if added:
print 'Added symbols:'
for i in added:
print ' ', i
if removed:
print 'Removed symbols:'
for i in removed:
print ' ', i
return result
def cmp_symbols(commonver):
for ver in commonver:
names = ver.names();
names.sort()
for symname in names:
sym = ver.symbols[symname]
match = sym.origsym.definition == sym.newsym.definition
if not match:
App.result_code = 1
if Config.verbose >= 1 or not match:
print '%s: definitions %smatch' % \
(sym.origsym.name_ver, "" if match else "mis")
if Config.dump or (not match and not Config.no_dump):
for x in [(sym.origsym, Config.origfile),
(sym.newsym, Config.newfile)]:
xsym = x[0]
xout = x[1].out
if not xsym.definition:
print >> xout, '\n// Definition not found: %s %s' % \
(xsym.name_ver, xsym.lib.libfile)
continue
print >> xout, '\n// Definitions mismatch: %s %s' % \
(xsym.name_ver, xsym.lib.libfile)
pp = PrettyPrinter()
pp.run(xsym.definition)
for i in pp.nested():
print >> xout, i
print >> xout, pp.result()
def dump_symbols(commonver):
class SymbolDump(object):
def __init__(self, io_conf):
self.io_conf = io_conf
self.pp = PrettyPrinter()
self.res = []
def run(self, sym):
r = self.pp.run(sym.definition)
self.res.append('/* %s@%s */ %s' % (sym.name, sym.version, r))
def finish(self):
print >> self.io_conf.out, '\n// Symbol dump: version %s, library %s' % \
(ver.name, self.io_conf.filename)
for i in self.pp.nested():
print >> self.io_conf.out, i
print >> self.io_conf.out, ''
for i in self.res:
print >> self.io_conf.out, i
for ver in commonver:
names = sorted(ver.names());
d_orig = SymbolDump(Config.origfile)
d_new = SymbolDump(Config.newfile)
for symname in names:
sym = ver.symbols[symname]
if not sym.origsym.definition or not sym.newsym.definition:
# XXX
warn(Config.w_symbol, 'Missing symbol definition: %s@%s' % \
(symname, ver.name))
continue
d_orig.run(sym.origsym)
d_new.run(sym.newsym)
d_orig.finish()
d_new.finish()
if __name__ == '__main__':
Config.init()
parser = optparse.OptionParser(usage="usage: %prog origlib newlib",
version="%prog " + Config.version)
parser.add_option('-v', '--verbose', action='count',
help="verbose mode, may be specified several times")
parser.add_option('--alias-prefix', action='append',
help="name prefix to try for symbol alias lookup", metavar="STR")
parser.add_option('--dump', action='store_true',
help="dump symbol definitions")
parser.add_option('--no-dump', action='store_true',
help="disable dump for mismatched symbols")
parser.add_option('--out-orig', action='store',
help="result output file for original library", metavar="ORIGFILE")
parser.add_option('--out-new', action='store',
help="result output file for new library", metavar="NEWFILE")
parser.add_option('--exclude-ver', action='append', metavar="RE")
parser.add_option('--include-ver', action='append', metavar="RE")
parser.add_option('--exclude-sym', action='append', metavar="RE")
parser.add_option('--include-sym', action='append', metavar="RE")
for opt in ['alias', 'cached', 'symbol']:
parser.add_option("--w-" + opt,
action="store_true", dest="w_" + opt)
parser.add_option("--w-no-" + opt,
action="store_false", dest="w_" + opt)
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(-1)
if opts.out_orig:
Config.origfile.init(opts.out_orig)
if opts.out_new:
Config.newfile.init(opts.out_new)
if opts.no_dump:
Config.dump = False
Config.no_dump = True
if opts.dump:
Config.dump = True
Config.no_dump = False
Config.verbose = 1
if opts.verbose:
Config.verbose = opts.verbose
if opts.alias_prefix:
Config.alias_prefixes = opts.alias_prefix
Config.alias_prefixes.sort(key=lambda x: -len(x))
for (k, v) in ({ '_sym': Config.symbol_filter,
'_ver': Config.version_filter }).items():
for a in [ 'exclude', 'include' ]:
opt = getattr(opts, a + k)
if opt:
getattr(v, a).extend(opt)
Config.version_filter.compile()
Config.symbol_filter.compile()
for w in ['w_alias', 'w_cached', 'w_symbol']:
if hasattr(opts, w):
v = getattr(opts, w)
if v != None:
setattr(Config, w, v)
(Config.origfile.filename, Config.newfile.filename) = (args[0], args[1])
origlib = Shlib(Config.origfile.filename)
origlib.parse()
newlib = Shlib(Config.newfile.filename)
newlib.parse()
commonver = common_symbols(origlib, newlib)
if Config.dump:
dump_symbols(commonver)
cmp_symbols(commonver)
if Config.verbose >= 4:
print Dwarf.cmpcache.stats.show('Cmp')
print DwarfdumpParser.tagcache_stats.show('Dwarf tag')
sys.exit(App.result_code)
| dplbsd/soc2013 | head/tools/tools/shlib-compat/shlib-compat.py | Python | bsd-2-clause | 36,865 |
import locale
from urh.util.Logger import logger
class Formatter:
@staticmethod
def local_decimal_seperator():
return locale.localeconv()["decimal_point"]
@staticmethod
def science_time(time_in_seconds: float, decimals=2, append_seconds=True, remove_spaces=False) -> str:
if time_in_seconds < 1e-6:
suffix = "n"
value = time_in_seconds * 1e9
elif time_in_seconds < 1e-3:
suffix = "µ"
value = time_in_seconds * 1e6
elif time_in_seconds < 1:
suffix = "m"
value = time_in_seconds * 1e3
else:
suffix = ""
value = time_in_seconds
result = locale.format_string("%.{0}f ".format(decimals) + suffix, value)
if append_seconds:
result += "s"
if remove_spaces:
result = result.replace(" ", "")
return result
@staticmethod
def big_value_with_suffix(value: float, decimals=3, strip_zeros=True) -> str:
fmt_str = "%.{0:d}f".format(decimals)
suffix = ""
if abs(value) >= 1e9:
suffix = "G"
result = locale.format_string(fmt_str, value / 1e9)
elif abs(value) >= 1e6:
suffix = "M"
result = locale.format_string(fmt_str, value / 1e6)
elif abs(value) >= 1e3:
suffix = "K"
result = locale.format_string(fmt_str, value / 1e3)
else:
result = locale.format_string(fmt_str, value)
if strip_zeros:
result = result.rstrip("0").rstrip(Formatter.local_decimal_seperator())
return result + suffix
@staticmethod
def str2val(str_val, dtype, default=0):
try:
return dtype(str_val)
except (ValueError, TypeError):
logger.warning("The {0} is not a valid {1}, assuming {2}".format(str_val, str(dtype), str(default)))
return default
| jopohl/urh | src/urh/util/Formatter.py | Python | gpl-3.0 | 1,943 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for Relay program analysis."""
import tvm._ffi
tvm._ffi._init_api("relay.analysis", __name__)
| dmlc/tvm | python/tvm/relay/analysis/_ffi_api.py | Python | apache-2.0 | 892 |
#!/usr/bin/python3
import math
import os
import subprocess
import traceback
import dbus
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('CDesktopEnums', '3.0')
gi.require_version('CinnamonDesktop', '3.0')
from gi.repository import Gio, Gtk, GObject, Gdk, GLib, GdkPixbuf, CDesktopEnums, CinnamonDesktop, XApp
from ChooserButtonWidgets import *
from KeybindingWidgets import ButtonKeybinding
settings_objects = {}
CAN_BACKEND = ["Switch", "SpinButton", "Entry", "TextView", "FontButton", "Range", "ComboBox",
"ColorChooser", "FileChooser", "SoundFileChooser", "IconChooser", "TweenChooser",
"EffectChooser", "DateChooser", "Keybinding"]
class EditableEntry (Gtk.Stack):
__gsignals__ = {
'changed': (GObject.SignalFlags.RUN_FIRST, None,
(str,))
}
def __init__ (self):
super(EditableEntry, self).__init__()
self.set_transition_type(Gtk.StackTransitionType.CROSSFADE)
self.set_transition_duration(150)
self.label = Gtk.Label()
self.entry = Gtk.Entry()
self.button = Gtk.Button()
self.button.set_alignment(1.0, 0.5)
self.button.set_relief(Gtk.ReliefStyle.NONE)
self.add_named(self.button, "button");
self.add_named(self.entry, "entry");
self.set_visible_child_name("button")
self.editable = False
self.current_text = None
self.show_all()
self.button.connect("released", self._on_button_clicked)
self.button.connect("activate", self._on_button_clicked)
self.entry.connect("activate", self._on_entry_validated)
self.entry.connect("changed", self._on_entry_changed)
self.entry.connect("focus-out-event", self._on_focus_lost)
def set_text(self, text):
self.button.set_label(text)
self.entry.set_text(text)
self.current_text = text
def _on_focus_lost(self, widget, event):
self.button.set_label(self.current_text)
self.entry.set_text(self.current_text)
self.set_editable(False)
def _on_button_clicked(self, button):
self.set_editable(True)
self.entry.grab_focus()
def _on_entry_validated(self, entry):
self.set_editable(False)
self.emit("changed", entry.get_text())
self.current_text = entry.get_text()
def _on_entry_changed(self, entry):
self.button.set_label(entry.get_text())
def set_editable(self, editable):
if (editable):
self.set_visible_child_name("entry")
else:
self.set_visible_child_name("button")
self.editable = editable
def set_tooltip_text(self, tooltip):
self.button.set_tooltip_text(tooltip)
def get_editable(self):
return self.editable
def get_text(self):
return self.entry.get_text()
class SidePage(object):
def __init__(self, name, icon, keywords, content_box = None, size = None, is_c_mod = False, is_standalone = False, exec_name = None, module=None):
self.name = name
self.icon = icon
self.content_box = content_box
self.widgets = []
self.is_c_mod = is_c_mod
self.is_standalone = is_standalone
self.exec_name = exec_name
self.module = module # Optionally set by the module so we can call on_module_selected() on it when we show it.
self.keywords = keywords
self.size = size
self.topWindow = None
self.builder = None
self.stack = None
if self.module != None:
self.module.loaded = False
def add_widget(self, widget):
self.widgets.append(widget)
def build(self):
# Clear all the widgets from the content box
widgets = self.content_box.get_children()
for widget in widgets:
self.content_box.remove(widget)
if (self.module is not None):
self.module.on_module_selected()
self.module.loaded = True
if self.is_standalone:
subprocess.Popen(self.exec_name.split())
return
# Add our own widgets
for widget in self.widgets:
if hasattr(widget, 'expand'):
self.content_box.pack_start(widget, True, True, 2)
else:
self.content_box.pack_start(widget, False, False, 2)
# C modules are sort of messy - they check the desktop type
# (for Unity or GNOME) and show/hide UI items depending on
# the result - so we cannot just show_all on the widget, it will
# mess up these modifications - so for these, we just show the
# top-level widget
if not self.is_c_mod:
self.content_box.show_all()
try:
self.check_third_arg()
except:
pass
return
self.content_box.show()
for child in self.content_box:
child.show()
# C modules can have non-C parts. C parts are all named c_box
if child.get_name() != "c_box":
pass
c_widgets = child.get_children()
if not c_widgets:
c_widget = self.content_box.c_manager.get_c_widget(self.exec_name)
if c_widget is not None:
child.pack_start(c_widget, False, False, 2)
c_widget.show()
else:
for c_widget in c_widgets:
c_widget.show()
def recursively_iterate(parent):
if self.stack:
return
for child in parent:
if isinstance(child, Gtk.Stack):
self.stack = child
break
elif isinstance(child, Gtk.Container):
recursively_iterate(child)
# Look for a stack recursively
recursively_iterate(child)
class CCModule:
def __init__(self, label, mod_id, icon, category, keywords, content_box):
sidePage = SidePage(label, icon, keywords, content_box, size=-1, is_c_mod=True, is_standalone=False, exec_name=mod_id, module=None)
self.sidePage = sidePage
self.name = mod_id
self.category = category
def process (self, c_manager):
if c_manager.lookup_c_module(self.name):
c_box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 2)
c_box.set_vexpand(False)
c_box.set_name("c_box")
self.sidePage.add_widget(c_box)
return True
else:
return False
class SAModule:
def __init__(self, label, mod_id, icon, category, keywords, content_box):
sidePage = SidePage(label, icon, keywords, content_box, False, False, True, mod_id)
self.sidePage = sidePage
self.name = mod_id
self.category = category
def process (self):
name = self.name.replace("pkexec ", "")
name = name.split()[0]
return GLib.find_program_in_path(name) is not None
def walk_directories(dirs, filter_func, return_directories=False):
# If return_directories is False: returns a list of valid subdir names
# Else: returns a list of valid tuples (subdir-names, parent-directory)
valid = []
try:
for thdir in dirs:
if os.path.isdir(thdir):
for t in os.listdir(thdir):
if filter_func(os.path.join(thdir, t)):
if return_directories:
valid.append([t, thdir])
else:
valid.append(t)
except:
pass
#logging.critical("Error parsing directories", exc_info=True)
return valid
class Section(Gtk.Box):
def __init__(self, name):
self.name = name
super(Section, self).__init__()
self.set_orientation(Gtk.Orientation.VERTICAL)
self.set_border_width(6)
self.set_spacing(6)
self.label = Gtk.Label()
self.label.set_markup("<b>%s</b>" % self.name)
hbox = Gtk.Box()
hbox.set_orientation(Gtk.Orientation.HORIZONTAL)
hbox.pack_start(self.label, False, False, 0)
self.pack_start(hbox, False, True, 0)
def add(self, widget):
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
box.set_margin_left(40)
box.set_margin_right(40)
box.pack_start(widget, False, True, 0)
self.pack_start(box, False, False, 0)
def add_expand(self, widget):
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
box.set_margin_left(40)
box.set_margin_right(40)
box.pack_start(widget, True, True, 0)
self.pack_start(box, False, False, 0)
def add_indented(self, widget):
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
box.set_margin_left(80)
box.set_margin_right(10)
box.pack_start(widget, False, True, 0)
self.pack_start(box, False, False, 0)
def add_indented_expand(self, widget):
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
box.set_margin_left(80)
box.set_margin_right(10)
box.pack_start(widget, True, True, 0)
self.pack_start(box, False, False, 0)
class SectionBg(Gtk.Viewport):
def __init__(self):
Gtk.Viewport.__init__(self)
self.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
style = self.get_style_context()
style.add_class("section-bg")
self.expand = True # Tells CS to give expand us to the whole window
class SettingsStack(Gtk.Stack):
def __init__(self):
Gtk.Stack.__init__(self)
self.set_transition_type(Gtk.StackTransitionType.SLIDE_LEFT_RIGHT)
self.set_transition_duration(150)
self.expand = True
class SettingsRevealer(Gtk.Revealer):
def __init__(self, schema=None, key=None, values=None, check_func=None):
Gtk.Revealer.__init__(self)
self.check_func = check_func
self.box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=15)
Gtk.Revealer.add(self, self.box)
self.set_transition_type(Gtk.RevealerTransitionType.SLIDE_DOWN)
self.set_transition_duration(150)
if schema:
self.settings = Gio.Settings.new(schema)
# if there aren't values or a function provided to determine visibility we can do a simple bind
if values is None and check_func is None:
self.settings.bind(key, self, "reveal-child", Gio.SettingsBindFlags.GET)
else:
self.values = values
self.settings.connect("changed::" + key, self.on_settings_changed)
self.on_settings_changed(self.settings, key)
def add(self, widget):
self.box.pack_start(widget, False, True, 0)
#only used when checking values
def on_settings_changed(self, settings, key):
value = settings.get_value(key).unpack()
if self.check_func is None:
self.set_reveal_child(value in self.values)
else:
self.set_reveal_child(self.check_func(value, self.values))
class SettingsPage(Gtk.Box):
def __init__(self):
Gtk.Box.__init__(self)
self.set_orientation(Gtk.Orientation.VERTICAL)
self.set_spacing(15)
self.set_margin_left(80)
self.set_margin_right(80)
self.set_margin_top(15)
self.set_margin_bottom(15)
def add_section(self, title):
section = SettingsBox(title)
self.pack_start(section, False, False, 0)
return section
def add_reveal_section(self, title, schema=None, key=None, values=None, revealer=None):
section = SettingsBox(title)
if revealer is None:
revealer = SettingsRevealer(schema, key, values)
revealer.add(section)
section._revealer = revealer
self.pack_start(revealer, False, False, 0)
return section
class SettingsBox(Gtk.Frame):
def __init__(self, title):
Gtk.Frame.__init__(self)
self.set_shadow_type(Gtk.ShadowType.IN)
frame_style = self.get_style_context()
frame_style.add_class("view")
self.size_group = Gtk.SizeGroup()
self.size_group.set_mode(Gtk.SizeGroupMode.VERTICAL)
self.box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.add(self.box)
toolbar = Gtk.Toolbar.new()
toolbar_context = toolbar.get_style_context()
Gtk.StyleContext.add_class(Gtk.Widget.get_style_context(toolbar), "cs-header")
label = Gtk.Label()
label.set_markup("<b>%s</b>" % title)
title_holder = Gtk.ToolItem()
title_holder.add(label)
toolbar.add(title_holder)
self.box.add(toolbar)
self.need_separator = False
def add_row(self, widget):
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
if self.need_separator:
vbox.add(Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL))
list_box = Gtk.ListBox()
list_box.set_selection_mode(Gtk.SelectionMode.NONE)
row = Gtk.ListBoxRow(can_focus=False)
row.add(widget)
if isinstance(widget, Switch):
list_box.connect("row-activated", widget.clicked)
list_box.add(row)
vbox.add(list_box)
self.box.add(vbox)
self.need_separator = True
def add_reveal_row(self, widget, schema=None, key=None, values=None, check_func=None, revealer=None):
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
if self.need_separator:
vbox.add(Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL))
list_box = Gtk.ListBox()
list_box.set_selection_mode(Gtk.SelectionMode.NONE)
row = Gtk.ListBoxRow(can_focus=False)
row.add(widget)
if isinstance(widget, Switch):
list_box.connect("row-activated", widget.clicked)
list_box.add(row)
vbox.add(list_box)
if revealer is None:
revealer = SettingsRevealer(schema, key, values, check_func)
widget.revealer = revealer
revealer.add(vbox)
self.box.add(revealer)
self.need_separator = True
return revealer
class SettingsWidget(Gtk.Box):
def __init__(self, dep_key=None):
Gtk.Box.__init__(self)
self.set_orientation(Gtk.Orientation.HORIZONTAL)
self.set_spacing(20)
self.set_border_width(5)
self.set_margin_left(20)
self.set_margin_right(20)
if dep_key:
self.set_dep_key(dep_key)
def set_dep_key(self, dep_key):
flag = Gio.SettingsBindFlags.GET
if dep_key[0] == "!":
dep_key = dep_key[1:]
flag |= Gio.Settings.BindFlags.INVERT_BOOLEAN
split = dep_key.split("/")
dep_settings = Gio.Settings.new(split[0])
dep_settings.bind(split[1], self, "sensitive", flag)
def add_to_size_group(self, group):
group.add_widget(self.content_widget)
def fill_row(self):
self.set_border_width(0)
self.set_margin_left(0)
self.set_margin_right(0)
def get_settings(self, schema):
global settings_objects
try:
return settings_objects[schema]
except:
settings_objects[schema] = Gio.Settings.new(schema)
return settings_objects[schema]
class SettingsLabel(Gtk.Label):
def __init__(self, text=None):
Gtk.Label.__init__(self)
if text:
self.set_label(text)
self.set_alignment(0.0, 0.5)
self.set_line_wrap(True)
def set_label_text(self, text):
self.set_label(text)
class IndentedHBox(Gtk.HBox):
def __init__(self):
super(IndentedHBox, self).__init__()
indent = Gtk.Label.new('\t')
self.pack_start(indent, False, False, 0)
def add(self, item):
self.pack_start(item, False, True, 0)
def add_expand(self, item):
self.pack_start(item, True, True, 0)
class Switch(SettingsWidget):
bind_prop = "active"
bind_dir = Gio.SettingsBindFlags.DEFAULT
def __init__(self, label, dep_key=None, tooltip=""):
super(Switch, self).__init__(dep_key=dep_key)
self.content_widget = Gtk.Switch(valign=Gtk.Align.CENTER)
self.label = SettingsLabel(label)
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, False, False, 0)
self.set_tooltip_text(tooltip)
def clicked(self, *args):
if self.is_sensitive():
self.content_widget.set_active(not self.content_widget.get_active())
class SpinButton(SettingsWidget):
bind_prop = "value"
bind_dir = Gio.SettingsBindFlags.GET
def __init__(self, label, units="", mini=None, maxi=None, step=1, page=None, size_group=None, dep_key=None, tooltip=""):
super(SpinButton, self).__init__(dep_key=dep_key)
self.timer = None
if units:
label += " (%s)" % units
self.label = SettingsLabel(label)
self.content_widget = Gtk.SpinButton()
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, False, False, 0)
range = self.get_range()
if mini == None or maxi == None:
mini = range[0]
maxi = range[1]
elif range is not None:
mini = max(mini, range[0])
maxi = min(maxi, range[1])
if not page:
page = step
self.content_widget.set_range(mini, maxi)
self.content_widget.set_increments(step, page)
digits = 0
if (step and '.' in str(step)):
digits = len(str(step).split('.')[1])
self.content_widget.set_digits(digits)
self.content_widget.connect("value-changed", self.apply_later)
self.set_tooltip_text(tooltip)
if size_group:
self.add_to_size_group(size_group)
def apply_later(self, *args):
def apply(self):
self.set_value(self.content_widget.get_value())
self.timer = None
if self.timer:
GLib.source_remove(self.timer)
self.timer = GLib.timeout_add(300, apply, self)
class Entry(SettingsWidget):
bind_prop = "text"
bind_dir = Gio.SettingsBindFlags.DEFAULT
def __init__(self, label, expand_width=False, size_group=None, dep_key=None, tooltip=""):
super(Entry, self).__init__(dep_key=dep_key)
self.label = SettingsLabel(label)
self.content_widget = Gtk.Entry()
self.content_widget.set_valign(Gtk.Align.CENTER)
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, expand_width, expand_width, 0)
self.set_tooltip_text(tooltip)
if size_group:
self.add_to_size_group(size_group)
class TextView(SettingsWidget):
bind_prop = "text"
bind_dir = Gio.SettingsBindFlags.DEFAULT
def __init__(self, label, height=200, dep_key=None, tooltip=""):
super(TextView, self).__init__(dep_key=dep_key)
self.set_orientation(Gtk.Orientation.VERTICAL)
self.set_spacing(8)
self.label = Gtk.Label.new(label)
self.label.set_halign(Gtk.Align.CENTER)
self.scrolledwindow = Gtk.ScrolledWindow(hadjustment=None, vadjustment=None)
self.scrolledwindow.set_size_request(width=-1, height=height)
self.scrolledwindow.set_policy(hscrollbar_policy=Gtk.PolicyType.AUTOMATIC,
vscrollbar_policy=Gtk.PolicyType.AUTOMATIC)
self.scrolledwindow.set_shadow_type(type=Gtk.ShadowType.ETCHED_IN)
self.content_widget = Gtk.TextView()
self.content_widget.set_border_width(3)
self.content_widget.set_wrap_mode(wrap_mode=Gtk.WrapMode.NONE)
self.bind_object = self.content_widget.get_buffer()
self.pack_start(self.label, False, False, 0)
self.add(self.scrolledwindow)
self.scrolledwindow.add(self.content_widget)
self._value_changed_timer = None
class FontButton(SettingsWidget):
bind_prop = "font-name"
bind_dir = Gio.SettingsBindFlags.DEFAULT
def __init__(self, label, size_group=None, dep_key=None, tooltip=""):
super(FontButton, self).__init__(dep_key=dep_key)
self.label = SettingsLabel(label)
self.content_widget = Gtk.FontButton()
self.content_widget.set_valign(Gtk.Align.CENTER)
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, False, False, 0)
self.set_tooltip_text(tooltip)
if size_group:
self.add_to_size_group(size_group)
class Range(SettingsWidget):
bind_prop = "value"
bind_dir = Gio.SettingsBindFlags.GET | Gio.SettingsBindFlags.NO_SENSITIVITY
def __init__(self, label, min_label="", max_label="", mini=None, maxi=None, step=None, invert=False, log=False, show_value=True, dep_key=None, tooltip="", flipped=False, units=""):
super(Range, self).__init__(dep_key=dep_key)
self.set_orientation(Gtk.Orientation.VERTICAL)
self.set_spacing(0)
self.log = log
self.invert = invert
self.flipped = flipped
self.timer = None
self.value = 0
hbox = Gtk.Box()
if units:
label += " ({})".format(units)
self.label = Gtk.Label.new(label)
self.label.set_halign(Gtk.Align.CENTER)
self.min_label= Gtk.Label()
self.max_label = Gtk.Label()
self.min_label.set_alignment(1.0, 0.75)
self.max_label.set_alignment(1.0, 0.75)
self.min_label.set_margin_right(6)
self.max_label.set_margin_left(6)
self.min_label.set_markup("<i><small>%s</small></i>" % min_label)
self.max_label.set_markup("<i><small>%s</small></i>" % max_label)
range = self.get_range()
if mini == None or maxi == None:
mini = range[0]
maxi = range[1]
elif range is not None:
mini = max(mini, range[0])
maxi = min(maxi, range[1])
if log:
mini = math.log(mini)
maxi = math.log(maxi)
if self.flipped:
self.map_get = lambda x: -1 * (math.log(x))
self.map_set = lambda x: math.exp(x)
else:
self.map_get = lambda x: math.log(x)
self.map_set = lambda x: math.exp(x)
elif self.flipped:
self.map_get = lambda x: x * -1
self.map_set = lambda x: x * -1
if self.flipped:
tmp_mini = mini
mini = maxi * -1
maxi = tmp_mini * -1
if step is None:
self.step = (maxi - mini) * 0.02
else:
self.step = math.log(step) if log else step
self.content_widget = Gtk.Scale.new_with_range(Gtk.Orientation.HORIZONTAL, mini, maxi, self.step)
self.content_widget.set_inverted(invert)
self.content_widget.set_draw_value(show_value and not self.flipped)
self.bind_object = self.content_widget.get_adjustment()
if invert:
self.step *= -1 # Gtk.Scale.new_with_range want a positive value, but our custom scroll handler wants a negative value
hbox.pack_start(self.min_label, False, False, 0)
hbox.pack_start(self.content_widget, True, True, 0)
hbox.pack_start(self.max_label, False, False, 0)
self.pack_start(self.label, False, False, 0)
self.pack_start(hbox, True, True, 6)
self.content_widget.connect("scroll-event", self.on_scroll_event)
self.content_widget.connect("value-changed", self.apply_later)
self.set_tooltip_text(tooltip)
def apply_later(self, *args):
def apply(self):
if self.log:
self.set_value(math.exp(abs(self.content_widget.get_value())))
else:
if self.flipped:
self.set_value(self.content_widget.get_value() * -1)
else:
self.set_value(self.content_widget.get_value())
self.timer = None
if self.timer:
GLib.source_remove(self.timer)
self.timer = GLib.timeout_add(300, apply, self)
def on_scroll_event(self, widget, event):
found, delta_x, delta_y = event.get_scroll_deltas()
# If you scroll up, delta_y < 0. This is a weird world
widget.set_value(widget.get_value() - delta_y * self.step)
return True
def add_mark(self, value, position, markup):
if self.log:
self.content_widget.add_mark(math.log(value), position, markup)
else:
self.content_widget.add_mark(value, position, markup)
def set_rounding(self, digits):
if not self.log:
self.content_widget.set_round_digits(digits)
self.content_widget.set_digits(digits)
class ComboBox(SettingsWidget):
bind_dir = None
def __init__(self, label, options=[], valtype=None, size_group=None, dep_key=None, tooltip=""):
super(ComboBox, self).__init__(dep_key=dep_key)
self.valtype = valtype
self.option_map = {}
self.label = SettingsLabel(label)
selected = None
self.content_widget = Gtk.ComboBox()
renderer_text = Gtk.CellRendererText()
self.content_widget.pack_start(renderer_text, True)
self.content_widget.add_attribute(renderer_text, "text", 1)
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, False, False, 0)
self.content_widget.set_valign(Gtk.Align.CENTER)
self.set_options(options)
self.set_tooltip_text(tooltip)
if size_group:
self.add_to_size_group(size_group)
def on_my_value_changed(self, widget):
tree_iter = widget.get_active_iter()
if tree_iter != None:
self.value = self.model[tree_iter][0]
self.set_value(self.value)
def on_setting_changed(self, *args):
self.value = self.get_value()
try:
self.content_widget.set_active_iter(self.option_map[self.value])
except:
self.content_widget.set_active_iter(None)
def connect_widget_handlers(self, *args):
self.content_widget.connect('changed', self.on_my_value_changed)
def set_options(self, options):
if self.valtype is not None:
var_type = self.valtype
else:
# assume all keys are the same type (mixing types is going to cause an error somewhere)
var_type = type(options[0][0])
self.model = Gtk.ListStore(var_type, str)
for option in options:
self.option_map[option[0]] = self.model.append([option[0], option[1]])
self.content_widget.set_model(self.model)
self.content_widget.set_id_column(0)
class ColorChooser(SettingsWidget):
bind_dir = None
def __init__(self, label, legacy_string=False, size_group=None, dep_key=None, tooltip=""):
super(ColorChooser, self).__init__(dep_key=dep_key)
# note: Gdk.Color is deprecated in favor of Gdk.RGBA, but as the hex format is still used
# in some places (most notably the desktop background handling in cinnamon-desktop) we
# still support it for now by adding the legacy_string argument
self.legacy_string = legacy_string
self.label = SettingsLabel(label)
self.content_widget = Gtk.ColorButton()
self.content_widget.set_use_alpha(True)
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, False, False, 0)
self.set_tooltip_text(tooltip)
if size_group:
self.add_to_size_group(size_group)
def on_setting_changed(self, *args):
color_string = self.get_value()
rgba = Gdk.RGBA()
rgba.parse(color_string)
self.content_widget.set_rgba(rgba)
def connect_widget_handlers(self, *args):
self.content_widget.connect('color-set', self.on_my_value_changed)
def on_my_value_changed(self, widget):
if self.legacy_string:
color_string = self.content_widget.get_color().to_string()
else:
color_string = self.content_widget.get_rgba().to_string()
self.set_value(color_string)
class FileChooser(SettingsWidget):
bind_dir = None
def __init__(self, label, dir_select=False, size_group=None, dep_key=None, tooltip=""):
super(FileChooser, self).__init__(dep_key=dep_key)
if dir_select:
action = Gtk.FileChooserAction.SELECT_FOLDER
else:
action = Gtk.FileChooserAction.OPEN
self.label = SettingsLabel(label)
self.content_widget = Gtk.FileChooserButton(action=action)
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, False, False, 0)
self.set_tooltip_text(tooltip)
if size_group:
self.add_to_size_group(size_group)
def on_file_selected(self, *args):
self.set_value(self.content_widget.get_uri())
def on_setting_changed(self, *args):
self.content_widget.set_uri(self.get_value())
def connect_widget_handlers(self, *args):
self.content_widget.connect("file-set", self.on_file_selected)
class SoundFileChooser(SettingsWidget):
bind_dir = None
def __init__(self, label, event_sounds=True, size_group=None, dep_key=None, tooltip=""):
super(SoundFileChooser, self).__init__(dep_key=dep_key)
self.event_sounds = event_sounds
self.label = SettingsLabel(label)
self.content_widget = Gtk.Box()
c = self.content_widget.get_style_context()
c.add_class(Gtk.STYLE_CLASS_LINKED)
self.file_picker_button = Gtk.Button()
self.file_picker_button.connect("clicked", self.on_picker_clicked)
button_content = Gtk.Box(spacing=5)
self.file_picker_button.add(button_content)
self.button_label = Gtk.Label()
button_content.pack_start(Gtk.Image(icon_name="sound"), False, False, 0)
button_content.pack_start(self.button_label, False, False, 0)
self.content_widget.pack_start(self.file_picker_button, True, True, 0)
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, False, False, 0)
self.play_button = Gtk.Button()
self.play_button.set_image(Gtk.Image.new_from_icon_name("media-playback-start-symbolic", Gtk.IconSize.BUTTON))
self.play_button.connect("clicked", self.on_play_clicked)
self.content_widget.pack_start(self.play_button, False, False, 0)
self._proxy = None
try:
Gio.DBusProxy.new_for_bus(Gio.BusType.SESSION, Gio.DBusProxyFlags.NONE, None,
'org.cinnamon.SettingsDaemon.Sound',
'/org/cinnamon/SettingsDaemon/Sound',
'org.cinnamon.SettingsDaemon.Sound',
None, self._on_proxy_ready, None)
except dbus.exceptions.DBusException as e:
print(e)
self._proxy = None
self.play_button.set_sensitive(False)
self.set_tooltip_text(tooltip)
if size_group:
self.add_to_size_group(size_group)
def _on_proxy_ready (self, object, result, data=None):
self._proxy = Gio.DBusProxy.new_for_bus_finish(result)
def on_play_clicked(self, widget):
self._proxy.PlaySoundFile("(us)", 0, self.get_value())
def on_picker_clicked(self, widget):
dialog = Gtk.FileChooserDialog(title=self.label.get_text(),
action=Gtk.FileChooserAction.OPEN,
transient_for=self.get_toplevel(),
buttons=(_("_Cancel"), Gtk.ResponseType.CANCEL,
_("_Open"), Gtk.ResponseType.ACCEPT))
if os.path.exists(self.get_value()):
dialog.set_filename(self.get_value())
else:
dialog.set_current_folder('/usr/share/sounds')
sound_filter = Gtk.FileFilter()
if self.event_sounds:
sound_filter.add_mime_type("audio/x-wav")
sound_filter.add_mime_type("audio/x-vorbis+ogg")
else:
sound_filter.add_mime_type("audio/*")
sound_filter.set_name(_("Sound files"))
dialog.add_filter(sound_filter)
if (dialog.run() == Gtk.ResponseType.ACCEPT):
name = dialog.get_filename()
self.set_value(name)
self.update_button_label(name)
dialog.destroy()
def update_button_label(self, absolute_path):
if absolute_path != "":
f = Gio.File.new_for_path(absolute_path)
self.button_label.set_label(f.get_basename())
def on_setting_changed(self, *args):
self.update_button_label(self.get_value())
def connect_widget_handlers(self, *args):
pass
class IconChooser(SettingsWidget):
bind_prop = "icon"
bind_dir = Gio.SettingsBindFlags.DEFAULT
def __init__(self, label, expand_width=False, size_group=None, dep_key=None, tooltip=""):
super(IconChooser, self).__init__(dep_key=dep_key)
self.label = SettingsLabel(label)
self.content_widget = XApp.IconChooserButton()
self.content_widget.set_icon_size(Gtk.IconSize.BUTTON)
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, expand_width, expand_width, 0)
self.set_tooltip_text(tooltip)
if size_group:
self.add_to_size_group(size_group)
class TweenChooser(SettingsWidget):
bind_prop = "tween"
bind_dir = Gio.SettingsBindFlags.DEFAULT
def __init__(self, label, size_group=None, dep_key=None, tooltip=""):
super(TweenChooser, self).__init__(dep_key=dep_key)
self.label = SettingsLabel(label)
self.content_widget = TweenChooserButton()
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, False, False, 0)
self.set_tooltip_text(tooltip)
if size_group:
self.add_to_size_group(size_group)
class EffectChooser(SettingsWidget):
bind_prop = "effect"
bind_dir = Gio.SettingsBindFlags.DEFAULT
def __init__(self, label, possible=None, size_group=None, dep_key=None, tooltip=""):
super(EffectChooser, self).__init__(dep_key=dep_key)
self.label = SettingsLabel(label)
self.content_widget = EffectChooserButton(possible)
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, False, False, 0)
self.set_tooltip_text(tooltip)
if size_group:
self.add_to_size_group(size_group)
class DateChooser(SettingsWidget):
bind_dir = None
def __init__(self, label, size_group=None, dep_key=None, tooltip=""):
super(DateChooser, self).__init__(dep_key=dep_key)
self.label = SettingsLabel(label)
self.content_widget = DateChooserButton()
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, False, False, 0)
self.set_tooltip_text(tooltip)
if size_group:
self.add_to_size_group(size_group)
def on_date_changed(self, *args):
date = self.content_widget.get_date()
self.set_value({"y": date[0], "m": date[1], "d": date[2]})
def on_setting_changed(self, *args):
date = self.get_value()
self.content_widget.set_date(date["y"], date["m"], date["d"])
def connect_widget_handlers(self, *args):
self.content_widget.connect("date-changed", self.on_date_changed)
class Keybinding(SettingsWidget):
bind_dir = None
def __init__(self, label, num_bind=2, size_group=None, dep_key=None, tooltip=""):
super(Keybinding, self).__init__(dep_key=dep_key)
self.num_bind = num_bind
self.label = SettingsLabel(label)
self.buttons = []
self.teach_button = None
self.content_widget = Gtk.Frame(shadow_type=Gtk.ShadowType.IN)
self.content_widget.set_valign(Gtk.Align.CENTER)
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
self.content_widget.add(box)
self.pack_start(self.label, False, False, 0)
self.pack_end(self.content_widget, False, False, 0)
for x in range(self.num_bind):
if x != 0:
box.add(Gtk.Separator(orientation=Gtk.Orientation.VERTICAL))
kb = ButtonKeybinding()
kb.set_size_request(150, -1)
kb.connect("accel-edited", self.on_kb_changed)
kb.connect("accel-cleared", self.on_kb_changed)
box.pack_start(kb, False, False, 0)
self.buttons.append(kb)
self.event_id = None
self.teaching = False
self.set_tooltip_text(tooltip)
if size_group:
self.add_to_size_group(size_group)
def on_kb_changed(self, *args):
bindings = []
for x in range(self.num_bind):
string = self.buttons[x].get_accel_string()
bindings.append(string)
self.set_value("::".join(bindings))
def on_setting_changed(self, *args):
value = self.get_value()
bindings = value.split("::")
for x in range(min(len(bindings), self.num_bind)):
self.buttons[x].set_accel_string(bindings[x])
def connect_widget_handlers(self, *args):
pass
class Button(SettingsWidget):
def __init__(self, label, callback=None):
super(Button, self).__init__()
self.label = label
self.callback = callback
self.content_widget = Gtk.Button(label=label)
self.pack_start(self.content_widget, True, True, 0)
self.content_widget.connect("clicked", self._on_button_clicked)
def _on_button_clicked(self, *args):
if self.callback is not None:
self.callback(self)
elif hasattr(self, "on_activated"):
self.on_activated()
else:
print("warning: button '%s' does nothing" % self.label)
def set_label(self, label):
self.label = label
self.content_widget.set_label(label)
class Text(SettingsWidget):
def __init__(self, label, align=Gtk.Align.START):
super(Text, self).__init__()
self.label = label
if align == Gtk.Align.END:
xalign = 1.0
justification = Gtk.Justification.RIGHT
elif align == Gtk.Align.CENTER:
xalign = 0.5
justification = Gtk.Justification.CENTER
else: # START and FILL align left
xalign = 0
justification = Gtk.Justification.LEFT
self.content_widget = Gtk.Label(label, halign=align, xalign=xalign, justify=justification)
self.content_widget.set_line_wrap(True)
self.pack_start(self.content_widget, True, True, 0)
| linuxmint/Cinnamon | files/usr/share/cinnamon/cinnamon-settings/bin/SettingsWidgets.py | Python | gpl-2.0 | 39,017 |
from base import Task
from common import phases
import filesystem
import volume
class PartitionVolume(Task):
description = 'Partitioning the volume'
phase = phases.volume_preparation
@classmethod
def run(cls, info):
info.volume.partition_map.create(info.volume)
class MapPartitions(Task):
description = 'Mapping volume partitions'
phase = phases.volume_preparation
predecessors = [PartitionVolume]
successors = [filesystem.Format]
@classmethod
def run(cls, info):
info.volume.partition_map.map(info.volume)
class UnmapPartitions(Task):
description = 'Removing volume partitions mapping'
phase = phases.volume_unmounting
predecessors = [filesystem.UnmountRoot]
successors = [volume.Detach]
@classmethod
def run(cls, info):
info.volume.partition_map.unmap(info.volume)
| brianspeir/Vanilla | vendor/bootstrap-vz/common/tasks/partitioning.py | Python | bsd-3-clause | 798 |
import os
import re
import yaml
import types
from keystoneclient.auth.identity import v2
from keystoneclient import session
from keystoneclient.v2_0.client import Client as keyclient
class OpenStack(object):
def __init__(self, conf):
config_file = os.path.join(os.path.dirname(__file__), conf)
try:
fh = open(config_file)
config = yaml.load(fh)
fh.close()
except Exception as e:
print("Error reading config file %s: %s" % (conf, e))
self.username = config['username']
self.password = config['password']
self.tenant = config['tenant']
self.auth_url = config['auth_url']
self.endpoints = config['endpoints']
self.act_name = ""
def getSession(self):
self.auth = v2.Password(auth_url=self.auth_url,
username=self.username,
password=self.password,
tenant_name=self.tenant)
return session.Session(auth=self.auth)
def getToken(self):
session = self.getSession()
return session.get_token()
def getEndpoint(self, service):
token = self.getToken()
client = keyclient(auth_url=self.auth_url, token=token)
print(client.services.list())
def parseAction(self, instance, parts):
args, parts = self.parseArgs(parts)
foo = None
try:
self.act_name = parts[0]
foo = getattr(instance, self.act_name)
except AttributeError:
print("That look like an incorrect tiddly bit.")
else:
parts.pop(0)
for p in parts:
try:
foo = getattr(foo, p)
except AttributeError:
print("That tiddly bit be wrong")
return foo, args
def parseOutput(self, output):
results = {self.act_name: []}
if hasattr(
output,
'__getitem__') or isinstance(
output,
types.GeneratorType):
for result in output:
if hasattr(result, 'to_dict'):
result = result.to_dict()
results[self.act_name].append(result)
else:
if hasattr(output, 'to_dict'):
results[self.act_name] = output.to_dict()
else:
results[self.act_name] = output
return results
def run(self, instance, parts):
action, args = self.parseAction(instance, parts)
return self.parseOutput(action(**args))
def parseArgs(self, arg):
arg.pop(0)
args = {}
parts = []
for a in arg:
if re.search('=', a):
argsplit = a.split('=')
args[argsplit[0]] = argsplit[1]
else:
parts.append(a)
return args, parts
| armab/st2contrib | packs/openstack/actions/lib/openstack.py | Python | apache-2.0 | 2,933 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields
class ProductCategory(models.Model):
_inherit = 'product.category'
procured_purchase_grouping = fields.Selection(
[('standard', 'Standard grouping'),
('line', 'No line grouping'),
('order', 'No order grouping')],
string='Procured purchase grouping', default='standard',
help="Select the behaviour for grouping procured purchases for the "
"the products of this category:\n"
"* Standard grouping (default): Procurements will generate "
"purchase orders as always, grouping lines and orders when "
"possible.\n"
"* No line grouping: If there are any open purchase order for "
"the same supplier, it will be reused, but lines won't be "
"merged.\n"
"* No order grouping: This option will prevent any kind of "
"grouping.")
| Eficent/odoomrp-wip | procurement_purchase_no_grouping/models/product_category.py | Python | agpl-3.0 | 1,186 |
from mock import MagicMock, patch
from nose.tools import eq_, raises
from orchestrator.tasks.common import ErrorHandlerTask, ping, async_wait
from orchestrator.tasks.util import TaskNotReadyException
@patch('orchestrator.tasks.common.group')
def test_on_failure_for_error_handler_task(m_group):
"""
Should invoke error tasks as expected
"""
# Given: Keyword args with error tasks
kwargs = {
'error_tasks': MagicMock()
}
# And: Mock Error
exc = MagicMock(spec=BaseException)
# When: I invoke on_failure with given keyword args
ErrorHandlerTask().on_failure(exc, 'MockTaskId', [],
kwargs, None)
# Then: Error tasks are invoked as expected
m_group.assert_called_once_with(kwargs['error_tasks'])
m_group.return_value.delay.assert_called_once_with(exc)
@patch('orchestrator.tasks.common.group')
def test_on_failure_for_error_handler_task_with_no_error_tasks(m_group):
"""
Should not invoke any tasks
"""
# Given: Keyword args with no error tasks
kwargs = {
'error_tasks': []
}
# And: Mock Error
exc = MagicMock(spec=BaseException)
# When: I invoke on_failure with given keyword args
ErrorHandlerTask().on_failure(exc, 'MockTaskId', [],
kwargs, None)
# Then: Error tasks are invoked as expected
eq_(m_group.call_count, 0)
def test_ping():
"""
Should return pong
"""
# When: I invoke ping task
ret_value = ping()
# Then: 'pong' is returned
eq_(ret_value, 'pong')
@patch('orchestrator.tasks.common.simple_result')
def test_async_wait(m_simple_result):
# Given: Mock result
result = {'mockkey': 'mockvalue'}
m_simple_result.return_value = result
# When: I perform async wait on mock result
ret_value = async_wait(result)
# Then: Expected return value is returned
eq_(ret_value, result)
m_simple_result.assert_called_once_with(result)
@patch('orchestrator.tasks.common.simple_result')
def test_async_wait_with_return_value_specified(m_simple_result):
# Given: Mock result
result = {'mockkey': 'mockvalue'}
m_simple_result.return_value = result
# When: I perform async wait on mock result
ret_value = async_wait(result, ret_value='Mock')
# Then: Expected return value is returned
eq_(ret_value, 'Mock')
m_simple_result.assert_called_once_with(result)
@patch('orchestrator.tasks.common.simple_result')
@raises(TaskNotReadyException)
def test_async_wait_when_task_is_not_ready(m_simple_result):
# Given: Result is not ready
m_simple_result.side_effect = TaskNotReadyException()
result = MagicMock()
# When: I perform async wait on mock result
async_wait(result)
# Then: TaskNotReadyException is re-raised
| totem/cluster-orchestrator | tests/unit/orchestrator/tasks/test_common.py | Python | mit | 2,816 |
# -*- coding: utf-8 -*-
###############################################################################
#
# ListMembers
# Retrieves the email addresses of members of a MailChimp list.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListMembers(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListMembers Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListMembers, self).__init__(temboo_session, '/Library/MailChimp/ListMembers')
def new_input_set(self):
return ListMembersInputSet()
def _make_result_set(self, result, path):
return ListMembersResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListMembersChoreographyExecution(session, exec_id, path)
class ListMembersInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListMembers
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Mailchimp.)
"""
super(ListMembersInputSet, self)._set_input('APIKey', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Specifies the number of records in a page to be returned. Must be greater than zero and less than or equal to 15000. Defaults to 100.)
"""
super(ListMembersInputSet, self)._set_input('Limit', value)
def set_ListId(self, value):
"""
Set the value of the ListId input for this Choreo. ((required, string) The id of the Mailchimp list to retrieve members from.)
"""
super(ListMembersInputSet, self)._set_input('ListId', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) Indicates the desired format for the response. Accepted values are "json" or "xml" (the default).)
"""
super(ListMembersInputSet, self)._set_input('ResponseFormat', value)
def set_Since(self, value):
"""
Set the value of the Since input for this Choreo. ((optional, date) Retrieves records that have changed since this date/time. Formatted like 'YYYY-MM-DD HH:MM:SS.)
"""
super(ListMembersInputSet, self)._set_input('Since', value)
def set_Start(self, value):
"""
Set the value of the Start input for this Choreo. ((optional, integer) Specifies the page at which to begin returning records. Page size is defined by the limit argument. Must be zero or greater. Defaults to 0.)
"""
super(ListMembersInputSet, self)._set_input('Start', value)
def set_Status(self, value):
"""
Set the value of the Status input for this Choreo. ((optional, string) Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'updated'. Defaults to 'subscribed'.)
"""
super(ListMembersInputSet, self)._set_input('Status', value)
class ListMembersResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListMembers Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Mailchimp. Corresponds to the format specified in the ResponseFormat parameter. Defaults to "xml".)
"""
return self._output.get('Response', None)
class ListMembersChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListMembersResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/MailChimp/ListMembers.py | Python | apache-2.0 | 4,839 |
#!/usr/bin/python
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that C files are compiled by C compiler
from BoostBuild import Tester, List
t = Tester()
t.write("project-root.jam", "")
t.write("Jamfile", """
project ;
exe hello : hello.cpp a.c ;
""")
t.write("hello.cpp", """
extern "C" int foo();
int main() { return foo(); }
""")
t.write("a.c", """
// This won't compile unless in C mode
int foo()
{
int new = 0;
new = (new+1)*7;
return new;
}
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/hello.exe")
t.cleanup()
| albmarvil/The-Eternal-Sorrow | dependencies/luabind/boost-build/test/c_file.py | Python | apache-2.0 | 691 |
import string
from django.conf import settings
# Default backend used to for get_connection() function.
DEFAULT_BACKEND = getattr(settings, 'BREVISURL_BACKEND', 'brevisurl.backends.local.BrevisUrlBackend')
# Domain that is used to create shortened urls.
LOCAL_BACKEND_DOMAIN = getattr(settings, 'BREVISURL_BACKEND_LOCAL_DOMAIN', None) or getattr(settings, 'BREVISURL_LOCAL_BACKEND_DOMAIN', None)
# Characters that are used to generate tokens for local backend.
LOCAL_BACKEND_TOKEN_CHARS = getattr(settings, 'BREVISURL_LOCAL_BACKEND_TOKEN_CHARS', list(string.ascii_letters + string.digits))
# Settings for token length.
LOCAL_BACKEND_TOKEN_LENGTH = getattr(settings, 'BREVISURL_LOCAL_BACKEND_TOKEN_LENGTH', 5)
# Settings for url max length.
LOCAL_BACKEND_ORIGINAL_URL_MAX_LENGTH = getattr(settings, 'SHORTURL_LOCAL_BACKEND_ORIGINAL_URL_MAX_LENGTH', 200)
# Settings for url pattern.
LOCAL_BACKEND_URL_PATTERN = getattr(settings, 'BREVISURL_LOCAL_BACKEND_URL_PATTERN',
r'^(?P<token>[a-zA-Z0-9]{' + str(LOCAL_BACKEND_TOKEN_LENGTH) + r'})$')
# Protocol for local backend.
LOCAL_BACKEND_DOMAIN_PROTOCOL = getattr(settings, 'BREVISURL_LOCAL_BACKEND_DOMAIN_PROTOCOL', 'http')
| Peach-Labs/django-brevisurl | brevisurl/settings.py | Python | bsd-3-clause | 1,212 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^index$', views.index, name='index'),
url(r'^signup/$', views.signup, name = 'signup'),
url(r'^map/$', views.map, name = 'map'),
url(r'^qrtest/$', views.qrtest, name = 'qrtest'),
url(r'^manage_priviliges/$', views.manage_priviliges, name ='manage_priviliges'),
]
| cliffpanos/True-Pass-iOS | CheckIn/pages/urls.py | Python | apache-2.0 | 403 |
#!/usr/bin/env python
import logging
from ina219 import INA219
SHUNT_OHMS = 0.1
MAX_EXPECTED_AMPS = 0.2
def read():
ina = INA219(SHUNT_OHMS, MAX_EXPECTED_AMPS, log_level=logging.INFO)
ina.configure(ina.RANGE_16V, ina.GAIN_AUTO)
print("Bus Voltage : %.3f V" % ina.voltage())
print("Bus Current : %.3f mA" % ina.current())
print("Supply Voltage : %.3f V" % ina.supply_voltage())
print("Shunt voltage : %.3f mV" % ina.shunt_voltage())
print("Power : %.3f mW" % ina.power())
if __name__ == "__main__":
read()
| chrisb2/pi_ina219 | example.py | Python | mit | 560 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Abstract base class of Port-specific entry points for the layout tests
test infrastructure (the Port and Driver classes)."""
import cgi
import difflib
import errno
import itertools
import logging
import os
import operator
import optparse
import re
import sys
try:
from collections import OrderedDict
except ImportError:
# Needed for Python < 2.7
from webkitpy.thirdparty.ordered_dict import OrderedDict
from webkitpy.common import find_files
from webkitpy.common import read_checksum_from_png
from webkitpy.common.memoized import memoized
from webkitpy.common.system import path
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.port import config as port_config
from webkitpy.port import driver
from webkitpy.port import http_lock
from webkitpy.port import image_diff
from webkitpy.port import server_process
from webkitpy.port.factory import PortFactory
from webkitpy.layout_tests.servers import apache_http_server
from webkitpy.layout_tests.servers import http_server
from webkitpy.layout_tests.servers import websocket_server
_log = logging.getLogger(__name__)
class Port(object):
"""Abstract class for Port-specific hooks for the layout_test package."""
# Subclasses override this. This should indicate the basic implementation
# part of the port name, e.g., 'win', 'gtk'; there is probably (?) one unique value per class.
# FIXME: We should probably rename this to something like 'implementation_name'.
port_name = None
# Test names resemble unix relative paths, and use '/' as a directory separator.
TEST_PATH_SEPARATOR = '/'
ALL_BUILD_TYPES = ('debug', 'release')
@classmethod
def determine_full_port_name(cls, host, options, port_name):
"""Return a fully-specified port name that can be used to construct objects."""
# Subclasses will usually override this.
options = options or {}
assert port_name.startswith(cls.port_name)
if getattr(options, 'webkit_test_runner', False) and not '-wk2' in port_name:
return port_name + '-wk2'
return port_name
def __init__(self, host, port_name, options=None, **kwargs):
# This value may be different from cls.port_name by having version modifiers
# and other fields appended to it (for example, 'qt-arm' or 'mac-wk2').
self._name = port_name
# These are default values that should be overridden in a subclasses.
self._version = ''
self._architecture = 'x86'
# FIXME: Ideally we'd have a package-wide way to get a
# well-formed options object that had all of the necessary
# options defined on it.
self._options = options or optparse.Values()
if self._name and '-wk2' in self._name:
self._options.webkit_test_runner = True
self.host = host
self._executive = host.executive
self._filesystem = host.filesystem
self._webkit_finder = WebKitFinder(host.filesystem)
self._config = port_config.Config(self._executive, self._filesystem, self.port_name)
self._helper = None
self._http_server = None
self._websocket_server = None
self._image_differ = None
self._server_process_constructor = server_process.ServerProcess # overridable for testing
self._http_lock = None # FIXME: Why does this live on the port object?
# Python's Popen has a bug that causes any pipes opened to a
# process that can't be executed to be leaked. Since this
# code is specifically designed to tolerate exec failures
# to gracefully handle cases where wdiff is not installed,
# the bug results in a massive file descriptor leak. As a
# workaround, if an exec failure is ever experienced for
# wdiff, assume it's not available. This will leak one
# file descriptor but that's better than leaking each time
# wdiff would be run.
#
# http://mail.python.org/pipermail/python-list/
# 2008-August/505753.html
# http://bugs.python.org/issue3210
self._wdiff_available = None
# FIXME: prettypatch.py knows this path, why is it copied here?
self._pretty_patch_path = self.path_from_webkit_base("Websites", "bugs.webkit.org", "PrettyPatch", "prettify.rb")
self._pretty_patch_available = None
if not hasattr(options, 'configuration') or not options.configuration:
self.set_option_default('configuration', self.default_configuration())
self._test_configuration = None
self._reftest_list = {}
self._results_directory = None
self._root_was_set = hasattr(options, 'root') and options.root
def additional_drt_flag(self):
return []
def supports_per_test_timeout(self):
return False
def default_pixel_tests(self):
# FIXME: Disable until they are run by default on build.webkit.org.
return False
def default_timeout_ms(self):
if self.get_option('webkit_test_runner'):
# Add some more time to WebKitTestRunner because it needs to syncronise the state
# with the web process and we want to detect if there is a problem with that in the driver.
return 80 * 1000
return 35 * 1000
def driver_stop_timeout(self):
""" Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
# We want to wait for at least 3 seconds, but if we are really slow, we want to be slow on cleanup as
# well (for things like ASAN, Valgrind, etc.)
return 3.0 * float(self.get_option('time_out_ms', '0')) / self.default_timeout_ms()
def wdiff_available(self):
if self._wdiff_available is None:
self._wdiff_available = self.check_wdiff(logging=False)
return self._wdiff_available
def pretty_patch_available(self):
if self._pretty_patch_available is None:
self._pretty_patch_available = self.check_pretty_patch(logging=False)
return self._pretty_patch_available
def should_retry_crashes(self):
return False
def default_child_processes(self):
"""Return the number of DumpRenderTree instances to use for this port."""
return self._executive.cpu_count()
def default_max_locked_shards(self):
"""Return the number of "locked" shards to run in parallel (like the http tests)."""
return 1
def worker_startup_delay_secs(self):
# FIXME: If we start workers up too quickly, DumpRenderTree appears
# to thrash on something and time out its first few tests. Until
# we can figure out what's going on, sleep a bit in between
# workers. See https://bugs.webkit.org/show_bug.cgi?id=79147 .
return 0.1
def baseline_path(self):
"""Return the absolute path to the directory to store new baselines in for this port."""
# FIXME: remove once all callers are calling either baseline_version_dir() or baseline_platform_dir()
return self.baseline_version_dir()
def baseline_platform_dir(self):
"""Return the absolute path to the default (version-independent) platform-specific results."""
return self._filesystem.join(self.layout_tests_dir(), 'platform', self.port_name)
def baseline_version_dir(self):
"""Return the absolute path to the platform-and-version-specific results."""
baseline_search_paths = self.baseline_search_path()
return baseline_search_paths[0]
def baseline_search_path(self):
return self.get_option('additional_platform_directory', []) + self._compare_baseline() + self.default_baseline_search_path()
def default_baseline_search_path(self):
"""Return a list of absolute paths to directories to search under for
baselines. The directories are searched in order."""
search_paths = []
if self.get_option('webkit_test_runner'):
search_paths.append(self._wk2_port_name())
search_paths.append(self.name())
if self.name() != self.port_name:
search_paths.append(self.port_name)
return map(self._webkit_baseline_path, search_paths)
@memoized
def _compare_baseline(self):
factory = PortFactory(self.host)
target_port = self.get_option('compare_port')
if target_port:
return factory.get(target_port).default_baseline_search_path()
return []
def check_build(self, needs_http):
"""This routine is used to ensure that the build is up to date
and all the needed binaries are present."""
# If we're using a pre-built copy of WebKit (--root), we assume it also includes a build of DRT.
if not self._root_was_set and self.get_option('build') and not self._build_driver():
return False
if not self._check_driver():
return False
if self.get_option('pixel_tests'):
if not self.check_image_diff():
return False
if not self._check_port_build():
return False
return True
def _check_driver(self):
driver_path = self._path_to_driver()
if not self._filesystem.exists(driver_path):
_log.error("%s was not found at %s" % (self.driver_name(), driver_path))
return False
return True
def _check_port_build(self):
# Ports can override this method to do additional checks.
return True
def check_sys_deps(self, needs_http):
"""If the port needs to do some runtime checks to ensure that the
tests can be run successfully, it should override this routine.
This step can be skipped with --nocheck-sys-deps.
Returns whether the system is properly configured."""
if needs_http:
return self.check_httpd()
return True
def check_image_diff(self, override_step=None, logging=True):
"""This routine is used to check whether image_diff binary exists."""
image_diff_path = self._path_to_image_diff()
if not self._filesystem.exists(image_diff_path):
_log.error("ImageDiff was not found at %s" % image_diff_path)
return False
return True
def check_pretty_patch(self, logging=True):
"""Checks whether we can use the PrettyPatch ruby script."""
try:
_ = self._executive.run_command(['ruby', '--version'])
except OSError, e:
if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
if logging:
_log.warning("Ruby is not installed; can't generate pretty patches.")
_log.warning('')
return False
if not self._filesystem.exists(self._pretty_patch_path):
if logging:
_log.warning("Unable to find %s; can't generate pretty patches." % self._pretty_patch_path)
_log.warning('')
return False
return True
def check_wdiff(self, logging=True):
if not self._path_to_wdiff():
# Don't need to log here since this is the port choosing not to use wdiff.
return False
try:
_ = self._executive.run_command([self._path_to_wdiff(), '--help'])
except OSError:
if logging:
message = self._wdiff_missing_message()
if message:
for line in message.splitlines():
_log.warning(' ' + line)
_log.warning('')
return False
return True
def _wdiff_missing_message(self):
return 'wdiff is not installed; please install it to generate word-by-word diffs.'
def check_httpd(self):
if self._uses_apache():
httpd_path = self._path_to_apache()
else:
httpd_path = self._path_to_lighttpd()
try:
server_name = self._filesystem.basename(httpd_path)
env = self.setup_environ_for_server(server_name)
if self._executive.run_command([httpd_path, "-v"], env=env, return_exit_code=True) != 0:
_log.error("httpd seems broken. Cannot run http tests.")
return False
return True
except OSError:
_log.error("No httpd found. Cannot run http tests.")
return False
def do_text_results_differ(self, expected_text, actual_text):
return expected_text != actual_text
def do_audio_results_differ(self, expected_audio, actual_audio):
return expected_audio != actual_audio
def diff_image(self, expected_contents, actual_contents, tolerance=None):
"""Compare two images and return a tuple of an image diff, a percentage difference (0-100), and an error string.
|tolerance| should be a percentage value (0.0 - 100.0).
If it is omitted, the port default tolerance value is used.
If an error occurs (like ImageDiff isn't found, or crashes, we log an error and return True (for a diff).
"""
if not actual_contents and not expected_contents:
return (None, 0, None)
if not actual_contents or not expected_contents:
return (True, 0, None)
if not self._image_differ:
self._image_differ = image_diff.ImageDiffer(self)
self.set_option_default('tolerance', 0.1)
if tolerance is None:
tolerance = self.get_option('tolerance')
return self._image_differ.diff_image(expected_contents, actual_contents, tolerance)
def diff_text(self, expected_text, actual_text, expected_filename, actual_filename):
"""Returns a string containing the diff of the two text strings
in 'unified diff' format."""
# The filenames show up in the diff output, make sure they're
# raw bytes and not unicode, so that they don't trigger join()
# trying to decode the input.
def to_raw_bytes(string_value):
if isinstance(string_value, unicode):
return string_value.encode('utf-8')
return string_value
expected_filename = to_raw_bytes(expected_filename)
actual_filename = to_raw_bytes(actual_filename)
diff = difflib.unified_diff(expected_text.splitlines(True),
actual_text.splitlines(True),
expected_filename,
actual_filename)
return ''.join(diff)
def check_for_leaks(self, process_name, process_pid):
# Subclasses should check for leaks in the running process
# and print any necessary warnings if leaks are found.
# FIXME: We should consider moving much of this logic into
# Executive and make it platform-specific instead of port-specific.
pass
def print_leaks_summary(self):
# Subclasses can override this to print a summary of leaks found
# while running the layout tests.
pass
def driver_name(self):
if self.get_option('driver_name'):
return self.get_option('driver_name')
if self.get_option('webkit_test_runner'):
return 'WebKitTestRunner'
return 'DumpRenderTree'
def expected_baselines_by_extension(self, test_name):
"""Returns a dict mapping baseline suffix to relative path for each baseline in
a test. For reftests, it returns ".==" or ".!=" instead of the suffix."""
# FIXME: The name similarity between this and expected_baselines() below, is unfortunate.
# We should probably rename them both.
baseline_dict = {}
reference_files = self.reference_files(test_name)
if reference_files:
# FIXME: How should this handle more than one type of reftest?
baseline_dict['.' + reference_files[0][0]] = self.relative_test_filename(reference_files[0][1])
for extension in self.baseline_extensions():
path = self.expected_filename(test_name, extension, return_default=False)
baseline_dict[extension] = self.relative_test_filename(path) if path else path
return baseline_dict
def baseline_extensions(self):
"""Returns a tuple of all of the non-reftest baseline extensions we use. The extensions include the leading '.'."""
return ('.wav', '.webarchive', '.txt', '.png')
def expected_baselines(self, test_name, suffix, all_baselines=False):
"""Given a test name, finds where the baseline results are located.
Args:
test_name: name of test file (usually a relative path under LayoutTests/)
suffix: file suffix of the expected results, including dot; e.g.
'.txt' or '.png'. This should not be None, but may be an empty
string.
all_baselines: If True, return an ordered list of all baseline paths
for the given platform. If False, return only the first one.
Returns
a list of ( platform_dir, results_filename ), where
platform_dir - abs path to the top of the results tree (or test
tree)
results_filename - relative path from top of tree to the results
file
(port.join() of the two gives you the full path to the file,
unless None was returned.)
Return values will be in the format appropriate for the current
platform (e.g., "\\" for path separators on Windows). If the results
file is not found, then None will be returned for the directory,
but the expected relative pathname will still be returned.
This routine is generic but lives here since it is used in
conjunction with the other baseline and filename routines that are
platform specific.
"""
baseline_filename = self._filesystem.splitext(test_name)[0] + '-expected' + suffix
baseline_search_path = self.baseline_search_path()
baselines = []
for platform_dir in baseline_search_path:
if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if not all_baselines and baselines:
return baselines
# If it wasn't found in a platform directory, return the expected
# result in the test directory, even if no such file actually exists.
platform_dir = self.layout_tests_dir()
if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if baselines:
return baselines
return [(None, baseline_filename)]
def expected_filename(self, test_name, suffix, return_default=True):
"""Given a test name, returns an absolute path to its expected results.
If no expected results are found in any of the searched directories,
the directory in which the test itself is located will be returned.
The return value is in the format appropriate for the platform
(e.g., "\\" for path separators on windows).
Args:
test_name: name of test file (usually a relative path under LayoutTests/)
suffix: file suffix of the expected results, including dot; e.g. '.txt'
or '.png'. This should not be None, but may be an empty string.
platform: the most-specific directory name to use to build the
search list of directories; e.g. 'mountainlion-wk2'
return_default: if True, returns the path to the generic expectation if nothing
else is found; if False, returns None.
This routine is generic but is implemented here to live alongside
the other baseline and filename manipulation routines.
"""
# FIXME: The [0] here is very mysterious, as is the destructured return.
platform_dir, baseline_filename = self.expected_baselines(test_name, suffix)[0]
if platform_dir:
return self._filesystem.join(platform_dir, baseline_filename)
actual_test_name = self.lookup_virtual_test_base(test_name)
if actual_test_name:
return self.expected_filename(actual_test_name, suffix)
if return_default:
return self._filesystem.join(self.layout_tests_dir(), baseline_filename)
return None
def expected_checksum(self, test_name):
"""Returns the checksum of the image we expect the test to produce, or None if it is a text-only test."""
png_path = self.expected_filename(test_name, '.png')
if self._filesystem.exists(png_path):
with self._filesystem.open_binary_file_for_reading(png_path) as filehandle:
return read_checksum_from_png.read_checksum(filehandle)
return None
def expected_image(self, test_name):
"""Returns the image we expect the test to produce."""
baseline_path = self.expected_filename(test_name, '.png')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_audio(self, test_name):
baseline_path = self.expected_filename(test_name, '.wav')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_text(self, test_name):
"""Returns the text output we expect the test to produce, or None
if we don't expect there to be any text output.
End-of-line characters are normalized to '\n'."""
# FIXME: DRT output is actually utf-8, but since we don't decode the
# output from DRT (instead treating it as a binary string), we read the
# baselines as a binary string, too.
baseline_path = self.expected_filename(test_name, '.txt')
if not self._filesystem.exists(baseline_path):
baseline_path = self.expected_filename(test_name, '.webarchive')
if not self._filesystem.exists(baseline_path):
return None
text = self._filesystem.read_binary_file(baseline_path)
return text.replace("\r\n", "\n")
def _get_reftest_list(self, test_name):
dirname = self._filesystem.join(self.layout_tests_dir(), self._filesystem.dirname(test_name))
if dirname not in self._reftest_list:
self._reftest_list[dirname] = Port._parse_reftest_list(self._filesystem, dirname)
return self._reftest_list[dirname]
@staticmethod
def _parse_reftest_list(filesystem, test_dirpath):
reftest_list_path = filesystem.join(test_dirpath, 'reftest.list')
if not filesystem.isfile(reftest_list_path):
return None
reftest_list_file = filesystem.read_text_file(reftest_list_path)
parsed_list = {}
for line in reftest_list_file.split('\n'):
line = re.sub('#.+$', '', line)
split_line = line.split()
if len(split_line) < 3:
continue
expectation_type, test_file, ref_file = split_line
parsed_list.setdefault(filesystem.join(test_dirpath, test_file), []).append((expectation_type, filesystem.join(test_dirpath, ref_file)))
return parsed_list
def reference_files(self, test_name):
"""Return a list of expectation (== or !=) and filename pairs"""
reftest_list = self._get_reftest_list(test_name)
if not reftest_list:
reftest_list = []
for expectation, prefix in (('==', ''), ('!=', '-mismatch')):
for extention in Port._supported_reference_extensions:
path = self.expected_filename(test_name, prefix + extention)
if self._filesystem.exists(path):
reftest_list.append((expectation, path))
return reftest_list
return reftest_list.get(self._filesystem.join(self.layout_tests_dir(), test_name), []) # pylint: disable=E1103
def tests(self, paths):
"""Return the list of tests found. Both generic and platform-specific tests matching paths should be returned."""
expanded_paths = self._expanded_paths(paths)
tests = self._real_tests(expanded_paths)
tests.extend(self._virtual_tests(expanded_paths, self.populated_virtual_test_suites()))
return tests
def _expanded_paths(self, paths):
expanded_paths = []
fs = self._filesystem
all_platform_dirs = [path for path in fs.glob(fs.join(self.layout_tests_dir(), 'platform', '*')) if fs.isdir(path)]
for path in paths:
expanded_paths.append(path)
if self.test_isdir(path) and not path.startswith('platform'):
for platform_dir in all_platform_dirs:
if fs.isdir(fs.join(platform_dir, path)) and platform_dir in self.baseline_search_path():
expanded_paths.append(self.relative_test_filename(fs.join(platform_dir, path)))
return expanded_paths
def _real_tests(self, paths):
# When collecting test cases, skip these directories
skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests', 'reference', 'reftest'])
files = find_files.find(self._filesystem, self.layout_tests_dir(), paths, skipped_directories, Port._is_test_file, self.test_key)
return [self.relative_test_filename(f) for f in files]
# When collecting test cases, we include any file with these extensions.
_supported_test_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl', '.htm', '.php', '.svg', '.mht', '.xht'])
_supported_reference_extensions = set(['.html', '.xml', '.xhtml', '.htm', '.svg', '.xht'])
@staticmethod
# If any changes are made here be sure to update the isUsedInReftest method in old-run-webkit-tests as well.
def is_reference_html_file(filesystem, dirname, filename):
if filename.startswith('ref-') or filename.startswith('notref-'):
return True
filename_wihout_ext, ext = filesystem.splitext(filename)
if ext not in Port._supported_reference_extensions:
return False
for suffix in ['-expected', '-expected-mismatch', '-ref', '-notref']:
if filename_wihout_ext.endswith(suffix):
return True
return False
@staticmethod
def _has_supported_extension(filesystem, filename):
"""Return true if filename is one of the file extensions we want to run a test on."""
extension = filesystem.splitext(filename)[1]
return extension in Port._supported_test_extensions
@staticmethod
def _is_test_file(filesystem, dirname, filename):
return Port._has_supported_extension(filesystem, filename) and not Port.is_reference_html_file(filesystem, dirname, filename)
def test_key(self, test_name):
"""Turns a test name into a list with two sublists, the natural key of the
dirname, and the natural key of the basename.
This can be used when sorting paths so that files in a directory.
directory are kept together rather than being mixed in with files in
subdirectories."""
dirname, basename = self.split_test(test_name)
return (self._natural_sort_key(dirname + self.TEST_PATH_SEPARATOR), self._natural_sort_key(basename))
def _natural_sort_key(self, string_to_split):
""" Turns a string into a list of string and number chunks, i.e. "z23a" -> ["z", 23, "a"]
This can be used to implement "natural sort" order. See:
http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
http://nedbatchelder.com/blog/200712.html#e20071211T054956
"""
def tryint(val):
try:
return int(val)
except ValueError:
return val
return [tryint(chunk) for chunk in re.split('(\d+)', string_to_split)]
def test_dirs(self):
"""Returns the list of top-level test directories."""
layout_tests_dir = self.layout_tests_dir()
return filter(lambda x: self._filesystem.isdir(self._filesystem.join(layout_tests_dir, x)),
self._filesystem.listdir(layout_tests_dir))
@memoized
def test_isfile(self, test_name):
"""Return True if the test name refers to a directory of tests."""
# Used by test_expectations.py to apply rules to whole directories.
if self._filesystem.isfile(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isfile(self.abspath_for_test(base))
@memoized
def test_isdir(self, test_name):
"""Return True if the test name refers to a directory of tests."""
# Used by test_expectations.py to apply rules to whole directories.
if self._filesystem.isdir(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isdir(self.abspath_for_test(base))
@memoized
def test_exists(self, test_name):
"""Return True if the test name refers to an existing test or baseline."""
# Used by test_expectations.py to determine if an entry refers to a
# valid test and by printing.py to determine if baselines exist.
return self.test_isfile(test_name) or self.test_isdir(test_name)
def split_test(self, test_name):
"""Splits a test name into the 'directory' part and the 'basename' part."""
index = test_name.rfind(self.TEST_PATH_SEPARATOR)
if index < 1:
return ('', test_name)
return (test_name[0:index], test_name[index:])
def normalize_test_name(self, test_name):
"""Returns a normalized version of the test name or test directory."""
if test_name.endswith('/'):
return test_name
if self.test_isdir(test_name):
return test_name + '/'
return test_name
def driver_cmd_line(self):
"""Prints the DRT command line that will be used."""
driver = self.create_driver(0)
return driver.cmd_line(self.get_option('pixel_tests'), [])
def update_baseline(self, baseline_path, data):
"""Updates the baseline for a test.
Args:
baseline_path: the actual path to use for baseline, not the path to
the test. This function is used to update either generic or
platform-specific baselines, but we can't infer which here.
data: contents of the baseline.
"""
self._filesystem.write_binary_file(baseline_path, data)
# FIXME: update callers to create a finder and call it instead of these next five routines (which should be protected).
def webkit_base(self):
return self._webkit_finder.webkit_base()
def path_from_webkit_base(self, *comps):
return self._webkit_finder.path_from_webkit_base(*comps)
def path_to_script(self, script_name):
return self._webkit_finder.path_to_script(script_name)
def layout_tests_dir(self):
return self._webkit_finder.layout_tests_dir()
def perf_tests_dir(self):
return self._webkit_finder.perf_tests_dir()
def skipped_layout_tests(self, test_list):
"""Returns tests skipped outside of the TestExpectations files."""
return set(self._tests_for_other_platforms()).union(self._skipped_tests_for_unsupported_features(test_list))
def _tests_from_skipped_file_contents(self, skipped_file_contents):
tests_to_skip = []
for line in skipped_file_contents.split('\n'):
line = line.strip()
line = line.rstrip('/') # Best to normalize directory names to not include the trailing slash.
if line.startswith('#') or not len(line):
continue
tests_to_skip.append(line)
return tests_to_skip
def _expectations_from_skipped_files(self, skipped_file_paths):
tests_to_skip = []
for search_path in skipped_file_paths:
filename = self._filesystem.join(self._webkit_baseline_path(search_path), "Skipped")
if not self._filesystem.exists(filename):
_log.debug("Skipped does not exist: %s" % filename)
continue
_log.debug("Using Skipped file: %s" % filename)
skipped_file_contents = self._filesystem.read_text_file(filename)
tests_to_skip.extend(self._tests_from_skipped_file_contents(skipped_file_contents))
return tests_to_skip
@memoized
def skipped_perf_tests(self):
return self._expectations_from_skipped_files([self.perf_tests_dir()])
def skips_perf_test(self, test_name):
for test_or_category in self.skipped_perf_tests():
if test_or_category == test_name:
return True
category = self._filesystem.join(self.perf_tests_dir(), test_or_category)
if self._filesystem.isdir(category) and test_name.startswith(test_or_category):
return True
return False
def name(self):
"""Returns a name that uniquely identifies this particular type of port
(e.g., "mac-snowleopard" or "chromium-linux-x86_x64" and can be passed
to factory.get() to instantiate the port."""
return self._name
def operating_system(self):
# Subclasses should override this default implementation.
return 'mac'
def version(self):
"""Returns a string indicating the version of a given platform, e.g.
'leopard' or 'xp'.
This is used to help identify the exact port when parsing test
expectations, determining search paths, and logging information."""
return self._version
def architecture(self):
return self._architecture
def get_option(self, name, default_value=None):
return getattr(self._options, name, default_value)
def set_option_default(self, name, default_value):
return self._options.ensure_value(name, default_value)
@memoized
def path_to_generic_test_expectations_file(self):
return self._filesystem.join(self.layout_tests_dir(), 'TestExpectations')
@memoized
def path_to_test_expectations_file(self):
"""Update the test expectations to the passed-in string.
This is used by the rebaselining tool. Raises NotImplementedError
if the port does not use expectations files."""
# FIXME: We need to remove this when we make rebaselining work with multiple files and just generalize expectations_files().
# test_expectations are always in mac/ not mac-leopard/ by convention, hence we use port_name instead of name().
return self._filesystem.join(self._webkit_baseline_path(self.port_name), 'TestExpectations')
def relative_test_filename(self, filename):
"""Returns a test_name a relative unix-style path for a filename under the LayoutTests
directory. Ports may legitimately return abspaths here if no relpath makes sense."""
# Ports that run on windows need to override this method to deal with
# filenames with backslashes in them.
if filename.startswith(self.layout_tests_dir()):
return self.host.filesystem.relpath(filename, self.layout_tests_dir())
else:
return self.host.filesystem.abspath(filename)
@memoized
def abspath_for_test(self, test_name):
"""Returns the full path to the file for a given test name. This is the
inverse of relative_test_filename()."""
return self._filesystem.join(self.layout_tests_dir(), test_name)
def results_directory(self):
"""Absolute path to the place to store the test results (uses --results-directory)."""
if not self._results_directory:
option_val = self.get_option('results_directory') or self.default_results_directory()
self._results_directory = self._filesystem.abspath(option_val)
return self._results_directory
def perf_results_directory(self):
return self._build_path()
def default_results_directory(self):
"""Absolute path to the default place to store the test results."""
# Results are store relative to the built products to make it easy
# to have multiple copies of webkit checked out and built.
return self._build_path('layout-test-results')
def setup_test_run(self):
"""Perform port-specific work at the beginning of a test run."""
pass
def clean_up_test_run(self):
"""Perform port-specific work at the end of a test run."""
if self._image_differ:
self._image_differ.stop()
self._image_differ = None
# FIXME: os.environ access should be moved to onto a common/system class to be more easily mockable.
def _value_or_default_from_environ(self, name, default=None):
if name in os.environ:
return os.environ[name]
return default
def _copy_value_from_environ_if_set(self, clean_env, name):
if name in os.environ:
clean_env[name] = os.environ[name]
def setup_environ_for_server(self, server_name=None):
# We intentionally copy only a subset of os.environ when
# launching subprocesses to ensure consistent test results.
clean_env = {}
variables_to_copy = [
# For Linux:
'XAUTHORITY',
'HOME',
'LANG',
'LD_LIBRARY_PATH',
'DBUS_SESSION_BUS_ADDRESS',
'XDG_DATA_DIRS',
'XDG_RUNTIME_DIR',
# Darwin:
'DYLD_LIBRARY_PATH',
'HOME',
# CYGWIN:
'HOMEDRIVE',
'HOMEPATH',
'_NT_SYMBOL_PATH',
# Windows:
'PATH',
# Most ports (?):
'WEBKIT_TESTFONTS',
'WEBKIT_OUTPUTDIR',
# Chromium:
'CHROME_DEVEL_SANDBOX',
]
for variable in variables_to_copy:
self._copy_value_from_environ_if_set(clean_env, variable)
# For Linux:
clean_env['DISPLAY'] = self._value_or_default_from_environ('DISPLAY', ':1')
for string_variable in self.get_option('additional_env_var', []):
[name, value] = string_variable.split('=', 1)
clean_env[name] = value
return clean_env
def show_results_html_file(self, results_filename):
"""This routine should display the HTML file pointed at by
results_filename in a users' browser."""
return self.host.user.open_url(path.abspath_to_uri(self.host.platform, results_filename))
def create_driver(self, worker_number, no_timeout=False):
"""Return a newly created Driver subclass for starting/stopping the test driver."""
return driver.DriverProxy(self, worker_number, self._driver_class(), pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
def start_helper(self):
"""If a port needs to reconfigure graphics settings or do other
things to ensure a known test configuration, it should override this
method."""
pass
def start_http_server(self, additional_dirs=None, number_of_servers=None):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a web server to be running."""
assert not self._http_server, 'Already running an http server.'
if self._uses_apache():
server = apache_http_server.LayoutTestApacheHttpd(self, self.results_directory(), additional_dirs=additional_dirs, number_of_servers=number_of_servers)
else:
server = http_server.Lighttpd(self, self.results_directory(), additional_dirs=additional_dirs, number_of_servers=number_of_servers)
server.start()
self._http_server = server
def start_websocket_server(self):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a websocket server to be running."""
assert not self._websocket_server, 'Already running a websocket server.'
server = websocket_server.PyWebSocket(self, self.results_directory())
server.start()
self._websocket_server = server
def http_server_supports_ipv6(self):
# Cygwin is the only platform to still use Apache 1.3, which only supports IPV4.
# Once it moves to Apache 2, we can drop this method altogether.
if self.host.platform.is_cygwin():
return False
return True
def acquire_http_lock(self):
self._http_lock = http_lock.HttpLock(None, filesystem=self._filesystem, executive=self._executive)
self._http_lock.wait_for_httpd_lock()
def stop_helper(self):
"""Shut down the test helper if it is running. Do nothing if
it isn't, or it isn't available. If a port overrides start_helper()
it must override this routine as well."""
pass
def stop_http_server(self):
"""Shut down the http server if it is running. Do nothing if it isn't."""
if self._http_server:
self._http_server.stop()
self._http_server = None
def stop_websocket_server(self):
"""Shut down the websocket server if it is running. Do nothing if it isn't."""
if self._websocket_server:
self._websocket_server.stop()
self._websocket_server = None
def release_http_lock(self):
if self._http_lock:
self._http_lock.cleanup_http_lock()
def exit_code_from_summarized_results(self, unexpected_results):
"""Given summarized results, compute the exit code to be returned by new-run-webkit-tests.
Bots turn red when this function returns a non-zero value. By default, return the number of regressions
to avoid turning bots red by flaky failures, unexpected passes, and missing results"""
# Don't turn bots red for flaky failures, unexpected passes, and missing results.
return unexpected_results['num_regressions']
#
# TEST EXPECTATION-RELATED METHODS
#
def test_configuration(self):
"""Returns the current TestConfiguration for the port."""
if not self._test_configuration:
self._test_configuration = TestConfiguration(self._version, self._architecture, self._options.configuration.lower())
return self._test_configuration
# FIXME: Belongs on a Platform object.
@memoized
def all_test_configurations(self):
"""Returns a list of TestConfiguration instances, representing all available
test configurations for this port."""
return self._generate_all_test_configurations()
# FIXME: Belongs on a Platform object.
def configuration_specifier_macros(self):
"""Ports may provide a way to abbreviate configuration specifiers to conveniently
refer to them as one term or alias specific values to more generic ones. For example:
(xp, vista, win7) -> win # Abbreviate all Windows versions into one namesake.
(lucid) -> linux # Change specific name of the Linux distro to a more generic term.
Returns a dictionary, each key representing a macro term ('win', for example),
and value being a list of valid configuration specifiers (such as ['xp', 'vista', 'win7'])."""
return {}
def all_baseline_variants(self):
"""Returns a list of platform names sufficient to cover all the baselines.
The list should be sorted so that a later platform will reuse
an earlier platform's baselines if they are the same (e.g.,
'snowleopard' should precede 'leopard')."""
raise NotImplementedError
def uses_test_expectations_file(self):
# This is different from checking test_expectations() is None, because
# some ports have Skipped files which are returned as part of test_expectations().
return self._filesystem.exists(self.path_to_test_expectations_file())
def warn_if_bug_missing_in_test_expectations(self):
return False
def expectations_dict(self):
"""Returns an OrderedDict of name -> expectations strings.
The names are expected to be (but not required to be) paths in the filesystem.
If the name is a path, the file can be considered updatable for things like rebaselining,
so don't use names that are paths if they're not paths.
Generally speaking the ordering should be files in the filesystem in cascade order
(TestExpectations followed by Skipped, if the port honors both formats),
then any built-in expectations (e.g., from compile-time exclusions), then --additional-expectations options."""
# FIXME: rename this to test_expectations() once all the callers are updated to know about the ordered dict.
expectations = OrderedDict()
for path in self.expectations_files():
if self._filesystem.exists(path):
expectations[path] = self._filesystem.read_text_file(path)
for path in self.get_option('additional_expectations', []):
expanded_path = self._filesystem.expanduser(path)
if self._filesystem.exists(expanded_path):
_log.debug("reading additional_expectations from path '%s'" % path)
expectations[path] = self._filesystem.read_text_file(expanded_path)
else:
_log.warning("additional_expectations path '%s' does not exist" % path)
return expectations
def _port_specific_expectations_files(self):
# Unlike baseline_search_path, we only want to search [WK2-PORT, PORT-VERSION, PORT] and any directories
# included via --additional-platform-directory, not the full casade.
search_paths = [self.port_name]
non_wk2_name = self.name().replace('-wk2', '')
if non_wk2_name != self.port_name:
search_paths.append(non_wk2_name)
if self.get_option('webkit_test_runner'):
# Because nearly all of the skipped tests for WebKit 2 are due to cross-platform
# issues, all wk2 ports share a skipped list under platform/wk2.
search_paths.extend(["wk2", self._wk2_port_name()])
search_paths.extend(self.get_option("additional_platform_directory", []))
return [self._filesystem.join(self._webkit_baseline_path(d), 'TestExpectations') for d in search_paths]
def expectations_files(self):
return [self.path_to_generic_test_expectations_file()] + self._port_specific_expectations_files()
def repository_paths(self):
"""Returns a list of (repository_name, repository_path) tuples of its depending code base.
By default it returns a list that only contains a ('WebKit', <webkitRepositoryPath>) tuple."""
# We use LayoutTest directory here because webkit_base isn't a part of WebKit repository in Chromium port
# where turnk isn't checked out as a whole.
return [('WebKit', self.layout_tests_dir())]
_WDIFF_DEL = '##WDIFF_DEL##'
_WDIFF_ADD = '##WDIFF_ADD##'
_WDIFF_END = '##WDIFF_END##'
def _format_wdiff_output_as_html(self, wdiff):
wdiff = cgi.escape(wdiff)
wdiff = wdiff.replace(self._WDIFF_DEL, "<span class=del>")
wdiff = wdiff.replace(self._WDIFF_ADD, "<span class=add>")
wdiff = wdiff.replace(self._WDIFF_END, "</span>")
html = "<head><style>.del { background: #faa; } "
html += ".add { background: #afa; }</style></head>"
html += "<pre>%s</pre>" % wdiff
return html
def _wdiff_command(self, actual_filename, expected_filename):
executable = self._path_to_wdiff()
return [executable,
"--start-delete=%s" % self._WDIFF_DEL,
"--end-delete=%s" % self._WDIFF_END,
"--start-insert=%s" % self._WDIFF_ADD,
"--end-insert=%s" % self._WDIFF_END,
actual_filename,
expected_filename]
@staticmethod
def _handle_wdiff_error(script_error):
# Exit 1 means the files differed, any other exit code is an error.
if script_error.exit_code != 1:
raise script_error
def _run_wdiff(self, actual_filename, expected_filename):
"""Runs wdiff and may throw exceptions.
This is mostly a hook for unit testing."""
# Diffs are treated as binary as they may include multiple files
# with conflicting encodings. Thus we do not decode the output.
command = self._wdiff_command(actual_filename, expected_filename)
wdiff = self._executive.run_command(command, decode_output=False,
error_handler=self._handle_wdiff_error)
return self._format_wdiff_output_as_html(wdiff)
def wdiff_text(self, actual_filename, expected_filename):
"""Returns a string of HTML indicating the word-level diff of the
contents of the two filenames. Returns an empty string if word-level
diffing isn't available."""
if not self.wdiff_available():
return ""
try:
# It's possible to raise a ScriptError we pass wdiff invalid paths.
return self._run_wdiff(actual_filename, expected_filename)
except OSError, e:
if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
# Silently ignore cases where wdiff is missing.
self._wdiff_available = False
return ""
raise
# This is a class variable so we can test error output easily.
_pretty_patch_error_html = "Failed to run PrettyPatch, see error log."
def pretty_patch_text(self, diff_path):
if self._pretty_patch_available is None:
self._pretty_patch_available = self.check_pretty_patch(logging=False)
if not self._pretty_patch_available:
return self._pretty_patch_error_html
command = ("ruby", "-I", self._filesystem.dirname(self._pretty_patch_path),
self._pretty_patch_path, diff_path)
try:
# Diffs are treated as binary (we pass decode_output=False) as they
# may contain multiple files of conflicting encodings.
return self._executive.run_command(command, decode_output=False)
except OSError, e:
# If the system is missing ruby log the error and stop trying.
self._pretty_patch_available = False
_log.error("Failed to run PrettyPatch (%s): %s" % (command, e))
return self._pretty_patch_error_html
except ScriptError, e:
# If ruby failed to run for some reason, log the command
# output and stop trying.
self._pretty_patch_available = False
_log.error("Failed to run PrettyPatch (%s):\n%s" % (command, e.message_with_output()))
return self._pretty_patch_error_html
def default_configuration(self):
return self._config.default_configuration()
#
# PROTECTED ROUTINES
#
# The routines below should only be called by routines in this class
# or any of its subclasses.
#
def _uses_apache(self):
return True
# FIXME: This does not belong on the port object.
@memoized
def _path_to_apache(self):
"""Returns the full path to the apache binary.
This is needed only by ports that use the apache_http_server module."""
# The Apache binary path can vary depending on OS and distribution
# See http://wiki.apache.org/httpd/DistrosDefaultLayout
for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]:
if self._filesystem.exists(path):
return path
_log.error("Could not find apache. Not installed or unknown path.")
return None
# FIXME: This belongs on some platform abstraction instead of Port.
def _is_redhat_based(self):
return self._filesystem.exists('/etc/redhat-release')
def _is_debian_based(self):
return self._filesystem.exists('/etc/debian_version')
def _is_arch_based(self):
return self._filesystem.exists('/etc/arch-release')
def _apache_version(self):
config = self._executive.run_command([self._path_to_apache(), '-v'])
return re.sub(r'(?:.|\n)*Server version: Apache/(\d+\.\d+)(?:.|\n)*', r'\1', config)
# We pass sys_platform into this method to make it easy to unit test.
def _apache_config_file_name_for_platform(self, sys_platform):
if sys_platform == 'cygwin':
return 'cygwin-httpd.conf' # CYGWIN is the only platform to still use Apache 1.3.
if sys_platform.startswith('linux'):
if self._is_redhat_based():
return 'fedora-httpd-' + self._apache_version() + '.conf'
if self._is_debian_based():
return 'debian-httpd-' + self._apache_version() + '.conf'
if self._is_arch_based():
return 'archlinux-httpd.conf'
# All platforms use apache2 except for CYGWIN (and Mac OS X Tiger and prior, which we no longer support).
return "apache2-httpd.conf"
def _path_to_apache_config_file(self):
"""Returns the full path to the apache configuration file.
If the WEBKIT_HTTP_SERVER_CONF_PATH environment variable is set, its
contents will be used instead.
This is needed only by ports that use the apache_http_server module."""
config_file_from_env = os.environ.get('WEBKIT_HTTP_SERVER_CONF_PATH')
if config_file_from_env:
if not self._filesystem.exists(config_file_from_env):
raise IOError('%s was not found on the system' % config_file_from_env)
return config_file_from_env
config_file_name = self._apache_config_file_name_for_platform(sys.platform)
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', config_file_name)
def _build_path(self, *comps):
root_directory = self.get_option('root')
if not root_directory:
build_directory = self.get_option('build_directory')
if build_directory:
root_directory = self._filesystem.join(build_directory, self.get_option('configuration'))
else:
root_directory = self._config.build_directory(self.get_option('configuration'))
# Set --root so that we can pass this to subprocesses and avoid making the
# slow call to config.build_directory() N times in each worker.
# FIXME: This is like @memoized, but more annoying and fragile; there should be another
# way to propagate values without mutating the options list.
self.set_option_default('root', root_directory)
return self._filesystem.join(self._filesystem.abspath(root_directory), *comps)
def _path_to_driver(self, configuration=None):
"""Returns the full path to the test driver (DumpRenderTree)."""
return self._build_path(self.driver_name())
def _driver_tempdir(self):
return self._filesystem.mkdtemp(prefix='%s-' % self.driver_name())
def _driver_tempdir_for_environment(self):
return self._driver_tempdir()
def _path_to_webcore_library(self):
"""Returns the full path to a built copy of WebCore."""
return None
def _path_to_helper(self):
"""Returns the full path to the layout_test_helper binary, which
is used to help configure the system for the test run, or None
if no helper is needed.
This is likely only used by start/stop_helper()."""
return None
def _path_to_image_diff(self):
"""Returns the full path to the image_diff binary, or None if it is not available.
This is likely used only by diff_image()"""
return self._build_path('ImageDiff')
def _path_to_lighttpd(self):
"""Returns the path to the LigHTTPd binary.
This is needed only by ports that use the http_server.py module."""
raise NotImplementedError('Port._path_to_lighttpd')
def _path_to_lighttpd_modules(self):
"""Returns the path to the LigHTTPd modules directory.
This is needed only by ports that use the http_server.py module."""
raise NotImplementedError('Port._path_to_lighttpd_modules')
def _path_to_lighttpd_php(self):
"""Returns the path to the LigHTTPd PHP executable.
This is needed only by ports that use the http_server.py module."""
raise NotImplementedError('Port._path_to_lighttpd_php')
@memoized
def _path_to_wdiff(self):
"""Returns the full path to the wdiff binary, or None if it is not available.
This is likely used only by wdiff_text()"""
for path in ("/usr/bin/wdiff", "/usr/bin/dwdiff"):
if self._filesystem.exists(path):
return path
return None
def _webkit_baseline_path(self, platform):
"""Return the full path to the top of the baseline tree for a
given platform."""
return self._filesystem.join(self.layout_tests_dir(), 'platform', platform)
# FIXME: Belongs on a Platform object.
def _generate_all_test_configurations(self):
"""Generates a list of TestConfiguration instances, representing configurations
for a platform across all OSes, architectures, build and graphics types."""
raise NotImplementedError('Port._generate_test_configurations')
def _driver_class(self):
"""Returns the port's driver implementation."""
return driver.Driver
def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
name_str = name or '<unknown process name>'
pid_str = str(pid or '<unknown>')
stdout_lines = (stdout or '<empty>').decode('utf8', 'replace').splitlines()
stderr_lines = (stderr or '<empty>').decode('utf8', 'replace').splitlines()
return (stderr, 'crash log for %s (pid %s):\n%s\n%s\n' % (name_str, pid_str,
'\n'.join(('STDOUT: ' + l) for l in stdout_lines),
'\n'.join(('STDERR: ' + l) for l in stderr_lines)))
def look_for_new_crash_logs(self, crashed_processes, start_time):
pass
def look_for_new_samples(self, unresponsive_processes, start_time):
pass
def sample_process(self, name, pid):
pass
def virtual_test_suites(self):
return []
def find_system_pid(self, name, pid):
# This is only overridden on Windows
return pid
@memoized
def populated_virtual_test_suites(self):
suites = self.virtual_test_suites()
# Sanity-check the suites to make sure they don't point to other suites.
suite_dirs = [suite.name for suite in suites]
for suite in suites:
assert suite.base not in suite_dirs
for suite in suites:
base_tests = self._real_tests([suite.base])
suite.tests = {}
for test in base_tests:
suite.tests[test.replace(suite.base, suite.name, 1)] = test
return suites
def _virtual_tests(self, paths, suites):
virtual_tests = list()
for suite in suites:
if paths:
for test in suite.tests:
if any(test.startswith(p) for p in paths):
virtual_tests.append(test)
else:
virtual_tests.extend(suite.tests.keys())
return virtual_tests
def lookup_virtual_test_base(self, test_name):
for suite in self.populated_virtual_test_suites():
if test_name.startswith(suite.name):
return test_name.replace(suite.name, suite.base, 1)
return None
def lookup_virtual_test_args(self, test_name):
for suite in self.populated_virtual_test_suites():
if test_name.startswith(suite.name):
return suite.args
return []
def should_run_as_pixel_test(self, test_input):
if not self._options.pixel_tests:
return False
if self._options.pixel_test_directories:
return any(test_input.test_name.startswith(directory) for directory in self._options.pixel_test_directories)
return self._should_run_as_pixel_test(test_input)
def _should_run_as_pixel_test(self, test_input):
# Default behavior is to allow all test to run as pixel tests if --pixel-tests is on and
# --pixel-test-directory is not specified.
return True
# FIXME: Eventually we should standarize port naming, and make this method smart enough
# to use for all port configurations (including architectures, graphics types, etc).
def _port_flag_for_scripts(self):
# This is overrriden by ports which need a flag passed to scripts to distinguish the use of that port.
# For example --qt on linux, since a user might have both Gtk and Qt libraries installed.
return None
def tooling_flag(self):
return "--port=%s%s" % (self.port_name, '-wk2' if self.get_option('webkit_test_runner') else '')
# This is modeled after webkitdirs.pm argumentsForConfiguration() from old-run-webkit-tests
def _arguments_for_configuration(self):
config_args = []
config_args.append(self._config.flag_for_configuration(self.get_option('configuration')))
# FIXME: We may need to add support for passing --32-bit like old-run-webkit-tests had.
port_flag = self._port_flag_for_scripts()
if port_flag:
config_args.append(port_flag)
return config_args
def _run_script(self, script_name, args=None, include_configuration_arguments=True, decode_output=True, env=None):
run_script_command = [self.path_to_script(script_name)]
if include_configuration_arguments:
run_script_command.extend(self._arguments_for_configuration())
if args:
run_script_command.extend(args)
output = self._executive.run_command(run_script_command, cwd=self.webkit_base(), decode_output=decode_output, env=env)
_log.debug('Output of %s:\n%s' % (run_script_command, output))
return output
def _build_driver(self):
environment = self.host.copy_current_environment()
environment.disable_gcc_smartquotes()
env = environment.to_dictionary()
# FIXME: We build both DumpRenderTree and WebKitTestRunner for
# WebKitTestRunner runs because DumpRenderTree still includes
# the DumpRenderTreeSupport module and the TestNetscapePlugin.
# These two projects should be factored out into their own
# projects.
try:
self._run_script("build-dumprendertree", args=self._build_driver_flags(), env=env)
if self.get_option('webkit_test_runner'):
self._run_script("build-webkittestrunner", args=self._build_driver_flags(), env=env)
except ScriptError, e:
_log.error(e.message_with_output(output_limit=None))
return False
return True
def _build_driver_flags(self):
return []
def test_search_path(self):
return self.baseline_search_path()
def _tests_for_other_platforms(self):
# By default we will skip any directory under LayoutTests/platform
# that isn't in our baseline search path (this mirrors what
# old-run-webkit-tests does in findTestsToRun()).
# Note this returns LayoutTests/platform/*, not platform/*/*.
entries = self._filesystem.glob(self._webkit_baseline_path('*'))
dirs_to_skip = []
for entry in entries:
if self._filesystem.isdir(entry) and entry not in self.test_search_path():
basename = self._filesystem.basename(entry)
dirs_to_skip.append('platform/%s' % basename)
return dirs_to_skip
def _runtime_feature_list(self):
"""If a port makes certain features available only through runtime flags, it can override this routine to indicate which ones are available."""
return None
def nm_command(self):
return 'nm'
def _modules_to_search_for_symbols(self):
path = self._path_to_webcore_library()
if path:
return [path]
return []
def _symbols_string(self):
symbols = ''
for path_to_module in self._modules_to_search_for_symbols():
try:
symbols += self._executive.run_command([self.nm_command(), path_to_module], error_handler=self._executive.ignore_error)
except OSError, e:
_log.warn("Failed to run nm: %s. Can't determine supported features correctly." % e)
return symbols
# Ports which use run-time feature detection should define this method and return
# a dictionary mapping from Feature Names to skipped directoires. NRWT will
# run DumpRenderTree --print-supported-features and parse the output.
# If the Feature Names are not found in the output, the corresponding directories
# will be skipped.
def _missing_feature_to_skipped_tests(self):
"""Return the supported feature dictionary. Keys are feature names and values
are the lists of directories to skip if the feature name is not matched."""
# FIXME: This list matches WebKitWin and should be moved onto the Win port.
return {
"Accelerated Compositing": ["compositing"],
"3D Rendering": ["animations/3d", "transforms/3d"],
}
def _has_test_in_directories(self, directory_lists, test_list):
if not test_list:
return False
directories = itertools.chain.from_iterable(directory_lists)
for directory, test in itertools.product(directories, test_list):
if test.startswith(directory):
return True
return False
def _skipped_tests_for_unsupported_features(self, test_list):
# Only check the runtime feature list of there are tests in the test_list that might get skipped.
# This is a performance optimization to avoid the subprocess call to DRT.
# If the port supports runtime feature detection, disable any tests
# for features missing from the runtime feature list.
# If _runtime_feature_list returns a non-None value, then prefer
# runtime feature detection over static feature detection.
if self._has_test_in_directories(self._missing_feature_to_skipped_tests().values(), test_list):
supported_feature_list = self._runtime_feature_list()
if supported_feature_list is not None:
return reduce(operator.add, [directories for feature, directories in self._missing_feature_to_skipped_tests().items() if feature not in supported_feature_list])
return []
def _wk2_port_name(self):
# By current convention, the WebKit2 name is always mac-wk2, win-wk2, not mac-leopard-wk2, etc,
# except for Qt because WebKit2 is only supported by Qt 5.0 (therefore: qt-5.0-wk2).
return "%s-wk2" % self.port_name
# We might need to pass scm into this function for scm.checkout_root
@staticmethod
def script_shell_command(script_name):
script_path = os.path.join("Tools", "Scripts", script_name)
return Executive.shell_command_for_script(script_path)
def make_args(self):
args = "--makeargs=\"-j%s\"" % self._executive.cpu_count()
if "MAKEFLAGS" in os.environ:
args = "--makeargs=\"%s\"" % os.environ["MAKEFLAGS"]
return args
def update_webkit_command(self, non_interactive=False):
return self.script_shell_command("update-webkit")
def check_webkit_style_command(self):
return self.script_shell_command("check-webkit-style")
def prepare_changelog_command(self):
return self.script_shell_command("prepare-ChangeLog")
def build_webkit_command(self, build_style=None):
command = self.script_shell_command("build-webkit")
if build_style == "debug":
command.append("--debug")
if build_style == "release":
command.append("--release")
return command
def run_javascriptcore_tests_command(self):
return self.script_shell_command("run-javascriptcore-tests")
def run_webkit_unit_tests_command(self):
return None
def run_webkit_tests_command(self):
return self.script_shell_command("run-webkit-tests")
def run_python_unittests_command(self):
return self.script_shell_command("test-webkitpy")
def run_perl_unittests_command(self):
return self.script_shell_command("test-webkitperl")
def run_bindings_tests_command(self):
return self.script_shell_command("run-bindings-tests")
class VirtualTestSuite(object):
def __init__(self, name, base, args, tests=None):
self.name = name
self.base = base
self.args = args
self.tests = tests or set()
def __repr__(self):
return "VirtualTestSuite('%s', '%s', %s)" % (self.name, self.base, self.args)
| klim-iv/phantomjs-qt5 | src/webkit/Tools/Scripts/webkitpy/port/base.py | Python | bsd-3-clause | 71,188 |
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example pauses an ad."""
import argparse
import sys
from google.api_core import protobuf_helpers
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
def main(client, customer_id, ad_group_id, ad_id):
ad_group_ad_service = client.get_service("AdGroupAdService")
ad_group_ad_operation = client.get_type("AdGroupAdOperation")
ad_group_ad = ad_group_ad_operation.update
ad_group_ad.resource_name = ad_group_ad_service.ad_group_ad_path(
customer_id, ad_group_id, ad_id
)
ad_group_ad.status = client.enums.AdGroupStatusEnum.PAUSED
client.copy_from(
ad_group_ad_operation.update_mask,
protobuf_helpers.field_mask(None, ad_group_ad._pb),
)
ad_group_ad_response = ad_group_ad_service.mutate_ad_group_ads(
customer_id=customer_id, operations=[ad_group_ad_operation]
)
print(
f"Paused ad group ad {ad_group_ad_response.results[0].resource_name}."
)
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v10")
parser = argparse.ArgumentParser(
description=("Pauses an ad in the specified customer's ad group.")
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-a", "--ad_group_id", type=str, required=True, help="The ad group ID."
)
parser.add_argument(
"-i", "--ad_id", type=str, required=True, help="The ad ID."
)
args = parser.parse_args()
try:
main(googleads_client, args.customer_id, args.ad_group_id, args.ad_id)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| googleads/google-ads-python | examples/basic_operations/pause_ad.py | Python | apache-2.0 | 3,013 |
from Cython.Compiler import Options
from Cython.Compiler import PyrexTypes
from Cython.Compiler.Visitor import CythonTransform
from Cython.Compiler.StringEncoding import EncodedString
from Cython.Compiler.AutoDocTransforms import (
ExpressionWriter as BaseExpressionWriter,
AnnotationWriter as BaseAnnotationWriter,
)
class ExpressionWriter(BaseExpressionWriter):
def visit_UnicodeNode(self, node):
self.emit_string(node)
class AnnotationWriter(ExpressionWriter, BaseAnnotationWriter):
pass
class EmbedSignature(CythonTransform):
def __init__(self, context):
super(EmbedSignature, self).__init__(context)
self.class_name = None
self.class_node = None
def _fmt_expr(self, node):
writer = ExpressionWriter()
result = writer.write(node)
# print(type(node).__name__, '-->', result)
return result
def _fmt_annotation(self, node):
writer = AnnotationWriter()
result = writer.write(node)
# print(type(node).__name__, '-->', result)
return result
def _fmt_arg(self, arg):
annotation = None
if arg.is_self_arg:
doc = arg.name # clinic: '$self'
elif arg.is_type_arg:
doc = arg.name # clinic: '$type'
else:
doc = arg.name
if arg.type is PyrexTypes.py_object_type:
annotation = None # XXX use 'Any' ?
else:
annotation = arg.type.declaration_code('', for_display=1)
#if arg.default and arg.default.is_none:
# annotation = 'Optional[%s]' % annotation
if arg.annotation:
annotation = self._fmt_annotation(arg.annotation)
if annotation:
doc = doc + (': %s' % annotation)
if arg.default:
default = self._fmt_expr(arg.default)
doc = doc + (' = %s' % default)
elif arg.default:
default = self._fmt_expr(arg.default)
doc = doc + ('=%s' % default)
return doc
def _fmt_star_arg(self, arg):
arg_doc = arg.name
if arg.annotation:
annotation = self._fmt_annotation(arg.annotation)
arg_doc = arg_doc + (': %s' % annotation)
return arg_doc
def _fmt_arglist(self, args,
npoargs=0, npargs=0, pargs=None,
nkargs=0, kargs=None,
hide_self=False):
arglist = []
for arg in args:
if not hide_self or not arg.entry.is_self_arg:
arg_doc = self._fmt_arg(arg)
arglist.append(arg_doc)
if pargs:
arg_doc = self._fmt_star_arg(pargs)
arglist.insert(npargs + npoargs, '*%s' % arg_doc)
elif nkargs:
arglist.insert(npargs + npoargs, '*')
if npoargs:
arglist.insert(npoargs, '/')
if kargs:
arg_doc = self._fmt_star_arg(kargs)
arglist.append('**%s' % arg_doc)
return arglist
def _fmt_ret_type(self, ret):
if ret is PyrexTypes.py_object_type:
return None
else:
return ret.declaration_code("", for_display=1)
def _fmt_signature(self, cls_name, func_name, args,
npoargs=0, npargs=0, pargs=None,
nkargs=0, kargs=None,
return_expr=None,
return_type=None, hide_self=False):
arglist = self._fmt_arglist(args,
npoargs, npargs, pargs,
nkargs, kargs,
hide_self=hide_self)
arglist_doc = ', '.join(arglist)
func_doc = '%s(%s)' % (func_name, arglist_doc)
if cls_name:
func_doc = '%s.%s' % (cls_name, func_doc)
ret_doc = None
if return_expr:
ret_doc = self._fmt_annotation(return_expr)
elif return_type:
ret_doc = self._fmt_ret_type(return_type)
if ret_doc:
docfmt = '%s -> %s' # clinic: '%s -> (%s)'
func_doc = docfmt % (func_doc, ret_doc)
return func_doc
def _embed_signature(self, signature, node_doc):
if node_doc:
docfmt = "%s\n%s" # clinic: "%s\n--\n\n%s
return docfmt % (signature, node_doc)
else:
return signature
def __call__(self, node):
if not Options.docstrings:
return node
else:
return super(EmbedSignature, self).__call__(node)
def visit_ClassDefNode(self, node):
oldname = self.class_name
oldclass = self.class_node
self.class_node = node
try:
# PyClassDefNode
self.class_name = node.name
except AttributeError:
# CClassDefNode
self.class_name = node.class_name
self.visitchildren(node)
self.class_name = oldname
self.class_node = oldclass
return node
def visit_LambdaNode(self, node):
# lambda expressions so not have signature or inner functions
return node
def visit_DefNode(self, node):
if not self.current_directives['embedsignature']:
return node
is_constructor = False
hide_self = False
if node.entry.is_special:
is_constructor = self.class_node and node.name == '__init__'
if not is_constructor:
return node
class_name, func_name = None, self.class_name
hide_self = True
else:
class_name, func_name = self.class_name, node.name
npoargs = getattr(node, 'num_posonly_args', 0)
nkargs = getattr(node, 'num_kwonly_args', 0)
npargs = len(node.args) - nkargs - npoargs
signature = self._fmt_signature(
class_name, func_name, node.args,
npoargs, npargs, node.star_arg,
nkargs, node.starstar_arg,
return_expr=node.return_type_annotation,
return_type=None, hide_self=hide_self)
if signature:
if is_constructor:
doc_holder = self.class_node.entry.type.scope
else:
doc_holder = node.entry
if doc_holder.doc is not None:
old_doc = doc_holder.doc
elif not is_constructor and getattr(node, 'py_func', None) is not None:
old_doc = node.py_func.entry.doc
else:
old_doc = None
new_doc = self._embed_signature(signature, old_doc)
doc_holder.doc = EncodedString(new_doc)
if not is_constructor and getattr(node, 'py_func', None) is not None:
node.py_func.entry.doc = EncodedString(new_doc)
return node
def visit_CFuncDefNode(self, node):
if not self.current_directives['embedsignature']:
return node
if not node.overridable: # not cpdef FOO(...):
return node
signature = self._fmt_signature(
self.class_name, node.declarator.base.name,
node.declarator.args,
return_type=node.return_type)
if signature:
if node.entry.doc is not None:
old_doc = node.entry.doc
elif getattr(node, 'py_func', None) is not None:
old_doc = node.py_func.entry.doc
else:
old_doc = None
new_doc = self._embed_signature(signature, old_doc)
node.entry.doc = EncodedString(new_doc)
py_func = getattr(node, 'py_func', None)
if py_func is not None:
py_func.entry.doc = EncodedString(new_doc)
return node
def visit_PropertyNode(self, node):
if not self.current_directives['embedsignature']:
return node
entry = node.entry
body = node.body
prop_name = entry.name
type_name = None
if entry.visibility == 'public':
# property synthesised from a cdef public attribute
type_name = entry.type.declaration_code("", for_display=1)
if not entry.type.is_pyobject:
type_name = "'%s'" % type_name
elif entry.type.is_extension_type:
type_name = entry.type.module_name + '.' + type_name
if type_name is None:
for stat in body.stats:
if stat.name != '__get__':
continue
cls_name = self.class_name
if cls_name:
prop_name = '%s.%s' % (cls_name, prop_name)
ret_annotation = stat.return_type_annotation
if ret_annotation:
type_name = self._fmt_annotation(ret_annotation)
if type_name is not None:
signature = '%s: %s' % (prop_name, type_name)
new_doc = self._embed_signature(signature, entry.doc)
entry.doc = EncodedString(new_doc)
return node
# Monkeypatch EmbedSignature transform
from Cython.Compiler import AutoDocTransforms
AutoDocTransforms.EmbedSignature = EmbedSignature
| mpi4py/mpi4py | conf/cyautodoc.py | Python | bsd-2-clause | 9,170 |
#!/usr/bin/python
import sys
import re
for line in sys.stdin:
# Turn the list into a collection of lowercase words
for word in re.findall(r'[a-zA-Z]+', line):
if word.isupper():
print("Capital 1")
else:
print("Lower 1") | JasonSanchez/w261 | week1/mapper.py | Python | mit | 268 |
"""Config flow for Somfy MyLink integration."""
import asyncio
from copy import deepcopy
import logging
from somfy_mylink_synergy import SomfyMyLinkSynergy
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.components.dhcp import HOSTNAME, IP_ADDRESS, MAC_ADDRESS
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import callback
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_REVERSE,
CONF_REVERSED_TARGET_IDS,
CONF_SYSTEM_ID,
CONF_TARGET_ID,
CONF_TARGET_NAME,
DEFAULT_PORT,
DOMAIN,
MYLINK_STATUS,
)
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from schema with values provided by the user.
"""
somfy_mylink = SomfyMyLinkSynergy(
data[CONF_SYSTEM_ID], data[CONF_HOST], data[CONF_PORT]
)
try:
status_info = await somfy_mylink.status_info()
except asyncio.TimeoutError as ex:
raise CannotConnect from ex
if not status_info or "error" in status_info:
_LOGGER.debug("Auth error: %s", status_info)
raise InvalidAuth
return {"title": f"MyLink {data[CONF_HOST]}"}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Somfy MyLink."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_ASSUMED
def __init__(self):
"""Initialize the somfy_mylink flow."""
self.host = None
self.mac = None
self.ip_address = None
async def async_step_dhcp(self, dhcp_discovery):
"""Handle dhcp discovery."""
if self._host_already_configured(dhcp_discovery[IP_ADDRESS]):
return self.async_abort(reason="already_configured")
formatted_mac = format_mac(dhcp_discovery[MAC_ADDRESS])
await self.async_set_unique_id(format_mac(formatted_mac))
self._abort_if_unique_id_configured(
updates={CONF_HOST: dhcp_discovery[IP_ADDRESS]}
)
self.host = dhcp_discovery[HOSTNAME]
self.mac = formatted_mac
self.ip_address = dhcp_discovery[IP_ADDRESS]
self.context["title_placeholders"] = {"ip": self.ip_address, "mac": self.mac}
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
if self._host_already_configured(user_input[CONF_HOST]):
return self.async_abort(reason="already_configured")
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=self.ip_address): str,
vol.Required(CONF_SYSTEM_ID): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
}
),
errors=errors,
)
async def async_step_import(self, user_input):
"""Handle import."""
if self._host_already_configured(user_input[CONF_HOST]):
return self.async_abort(reason="already_configured")
return await self.async_step_user(user_input)
def _host_already_configured(self, host):
"""See if we already have an entry matching the host."""
for entry in self._async_current_entries():
if entry.data.get(CONF_HOST) == host:
return True
return False
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for somfy_mylink."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
self.options = deepcopy(dict(config_entry.options))
self._target_id = None
@callback
def _async_callback_targets(self):
"""Return the list of targets."""
return self.hass.data[DOMAIN][self.config_entry.entry_id][MYLINK_STATUS][
"result"
]
@callback
def _async_get_target_name(self, target_id) -> str:
"""Find the name of a target in the api data."""
mylink_targets = self._async_callback_targets()
for cover in mylink_targets:
if cover["targetID"] == target_id:
return cover["name"]
raise KeyError
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if self.config_entry.state != config_entries.ENTRY_STATE_LOADED:
_LOGGER.error("MyLink must be connected to manage device options")
return self.async_abort(reason="cannot_connect")
if user_input is not None:
target_id = user_input.get(CONF_TARGET_ID)
if target_id:
return await self.async_step_target_config(None, target_id)
return self.async_create_entry(title="", data=self.options)
cover_dict = {None: None}
mylink_targets = self._async_callback_targets()
if mylink_targets:
for cover in mylink_targets:
cover_dict[cover["targetID"]] = cover["name"]
data_schema = vol.Schema({vol.Optional(CONF_TARGET_ID): vol.In(cover_dict)})
return self.async_show_form(step_id="init", data_schema=data_schema, errors={})
async def async_step_target_config(self, user_input=None, target_id=None):
"""Handle options flow for target."""
reversed_target_ids = self.options.setdefault(CONF_REVERSED_TARGET_IDS, {})
if user_input is not None:
if user_input[CONF_REVERSE] != reversed_target_ids.get(self._target_id):
reversed_target_ids[self._target_id] = user_input[CONF_REVERSE]
return await self.async_step_init()
self._target_id = target_id
return self.async_show_form(
step_id="target_config",
data_schema=vol.Schema(
{
vol.Optional(
CONF_REVERSE,
default=reversed_target_ids.get(target_id, False),
): bool
}
),
description_placeholders={
CONF_TARGET_NAME: self._async_get_target_name(target_id),
},
errors={},
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
| w1ll1am23/home-assistant | homeassistant/components/somfy_mylink/config_flow.py | Python | apache-2.0 | 7,332 |
from django.contrib import admin
# Register your models here.
from products.models import Product
admin.site.register(Product) | katchengli/IntlWDHackathon | products/admin.py | Python | mit | 128 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid.layers as layers
import paddle.fluid as fluid
from paddle.fluid.layers.control_flow import StaticRNN as PaddingRNN
import numpy as np
def lm_model(hidden_size,
vocab_size,
batch_size,
num_layers=2,
num_steps=20,
init_scale=0.1,
dropout=None,
rnn_model='static'):
def padding_rnn(input_embedding, len=3, init_hidden=None, init_cell=None):
weight_1_arr = []
weight_2_arr = []
bias_arr = []
hidden_array = []
cell_array = []
mask_array = []
for i in range(num_layers):
weight_1 = layers.create_parameter([hidden_size * 2, hidden_size*4], dtype="float32", name="fc_weight1_"+str(i), \
default_initializer=fluid.initializer.UniformInitializer(low=-init_scale, high=init_scale))
weight_1_arr.append(weight_1)
bias_1 = layers.create_parameter(
[hidden_size * 4],
dtype="float32",
name="fc_bias1_" + str(i),
default_initializer=fluid.initializer.Constant(0.0))
bias_arr.append(bias_1)
pre_hidden = layers.slice(
init_hidden, axes=[0], starts=[i], ends=[i + 1])
pre_cell = layers.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1])
pre_hidden = layers.reshape(pre_hidden, shape=[-1, hidden_size])
pre_cell = layers.reshape(pre_cell, shape=[-1, hidden_size])
hidden_array.append(pre_hidden)
cell_array.append(pre_cell)
input_embedding = layers.transpose(input_embedding, perm=[1, 0, 2])
rnn = PaddingRNN()
with rnn.step():
input = rnn.step_input(input_embedding)
for k in range(num_layers):
pre_hidden = rnn.memory(init=hidden_array[k])
pre_cell = rnn.memory(init=cell_array[k])
weight_1 = weight_1_arr[k]
bias = bias_arr[k]
nn = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=nn, y=weight_1)
gate_input = layers.elementwise_add(gate_input, bias)
#i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
i = layers.slice(
gate_input, axes=[1], starts=[0], ends=[hidden_size])
j = layers.slice(
gate_input,
axes=[1],
starts=[hidden_size],
ends=[hidden_size * 2])
f = layers.slice(
gate_input,
axes=[1],
starts=[hidden_size * 2],
ends=[hidden_size * 3])
o = layers.slice(
gate_input,
axes=[1],
starts=[hidden_size * 3],
ends=[hidden_size * 4])
c = pre_cell * layers.sigmoid(f) + layers.sigmoid(
i) * layers.tanh(j)
m = layers.tanh(c) * layers.sigmoid(o)
rnn.update_memory(pre_hidden, m)
rnn.update_memory(pre_cell, c)
rnn.step_output(m)
rnn.step_output(c)
input = m
if dropout != None and dropout > 0.0:
input = layers.dropout(
input,
dropout_prob=dropout,
dropout_implementation='upscale_in_train')
rnn.step_output(input)
#real_res = layers.concat(res, 0)
rnnout = rnn()
last_hidden_array = []
last_cell_array = []
real_res = rnnout[-1]
for i in range(num_layers):
m = rnnout[i * 2]
c = rnnout[i * 2 + 1]
m.stop_gradient = True
c.stop_gradient = True
last_h = layers.slice(
m, axes=[0], starts=[num_steps - 1], ends=[num_steps])
last_hidden_array.append(last_h)
last_c = layers.slice(
c, axes=[0], starts=[num_steps - 1], ends=[num_steps])
last_cell_array.append(last_c)
'''
else:
real_res = rnnout[-1]
for i in range( num_layers ):
m1, c1, m2, c2 = rnnout
real_res = m2
m1.stop_gradient = True
c1.stop_gradient = True
c2.stop_gradient = True
'''
#layers.Print( first_hidden, message="22", summarize=10)
#layers.Print( rnnout[1], message="11", summarize=10)
#real_res = ( rnnout[1] + rnnout[2] + rnnout[3] + rnnout[4]) / 4.0
real_res = layers.transpose(x=real_res, perm=[1, 0, 2])
last_hidden = layers.concat(last_hidden_array, 0)
last_cell = layers.concat(last_cell_array, 0)
'''
last_hidden = layers.concat( hidden_array, 1 )
last_hidden = layers.reshape( last_hidden, shape=[-1, num_layers, hidden_size])
last_hidden = layers.transpose( x = last_hidden, perm = [1, 0, 2])
last_cell = layers.concat( cell_array, 1)
last_cell = layers.reshape( last_cell, shape=[ -1, num_layers, hidden_size])
last_cell = layers.transpose( x = last_cell, perm = [1, 0, 2])
'''
return real_res, last_hidden, last_cell
def encoder_static(input_embedding, len=3, init_hidden=None,
init_cell=None):
weight_1_arr = []
weight_2_arr = []
bias_arr = []
hidden_array = []
cell_array = []
mask_array = []
for i in range(num_layers):
weight_1 = layers.create_parameter([hidden_size * 2, hidden_size*4], dtype="float32", name="fc_weight1_"+str(i), \
default_initializer=fluid.initializer.UniformInitializer(low=-init_scale, high=init_scale))
weight_1_arr.append(weight_1)
bias_1 = layers.create_parameter(
[hidden_size * 4],
dtype="float32",
name="fc_bias1_" + str(i),
default_initializer=fluid.initializer.Constant(0.0))
bias_arr.append(bias_1)
pre_hidden = layers.slice(
init_hidden, axes=[0], starts=[i], ends=[i + 1])
pre_cell = layers.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1])
pre_hidden = layers.reshape(pre_hidden, shape=[-1, hidden_size])
pre_cell = layers.reshape(pre_cell, shape=[-1, hidden_size])
hidden_array.append(pre_hidden)
cell_array.append(pre_cell)
res = []
for index in range(len):
input = layers.slice(
input_embedding, axes=[1], starts=[index], ends=[index + 1])
input = layers.reshape(input, shape=[-1, hidden_size])
for k in range(num_layers):
pre_hidden = hidden_array[k]
pre_cell = cell_array[k]
weight_1 = weight_1_arr[k]
bias = bias_arr[k]
nn = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=nn, y=weight_1)
gate_input = layers.elementwise_add(gate_input, bias)
i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
c = pre_cell * layers.sigmoid(f) + layers.sigmoid(
i) * layers.tanh(j)
m = layers.tanh(c) * layers.sigmoid(o)
hidden_array[k] = m
cell_array[k] = c
input = m
if dropout != None and dropout > 0.0:
input = layers.dropout(
input,
dropout_prob=dropout,
dropout_implementation='upscale_in_train')
res.append(layers.reshape(input, shape=[1, -1, hidden_size]))
real_res = layers.concat(res, 0)
real_res = layers.transpose(x=real_res, perm=[1, 0, 2])
last_hidden = layers.concat(hidden_array, 1)
last_hidden = layers.reshape(
last_hidden, shape=[-1, num_layers, hidden_size])
last_hidden = layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = layers.concat(cell_array, 1)
last_cell = layers.reshape(
last_cell, shape=[-1, num_layers, hidden_size])
last_cell = layers.transpose(x=last_cell, perm=[1, 0, 2])
return real_res, last_hidden, last_cell
x = layers.data(name="x", shape=[-1, 1, 1], dtype='int64')
y = layers.data(name="y", shape=[-1, 1], dtype='float32')
init_hidden = layers.data(name="init_hidden", shape=[1], dtype='float32')
init_cell = layers.data(name="init_cell", shape=[1], dtype='float32')
init_hidden = layers.reshape(
init_hidden, shape=[num_layers, -1, hidden_size])
init_cell = layers.reshape(init_cell, shape=[num_layers, -1, hidden_size])
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False,
param_attr=fluid.ParamAttr(
name='embedding_para',
initializer=fluid.initializer.UniformInitializer(
low=-init_scale, high=init_scale)))
x_emb = layers.reshape(x_emb, shape=[-1, num_steps, hidden_size])
if dropout != None and dropout > 0.0:
x_emb = layers.dropout(
x_emb,
dropout_prob=dropout,
dropout_implementation='upscale_in_train')
if rnn_model == "padding":
rnn_out, last_hidden, last_cell = padding_rnn(
x_emb, len=num_steps, init_hidden=init_hidden, init_cell=init_cell)
elif rnn_model == "static":
rnn_out, last_hidden, last_cell = encoder_static(
x_emb, len=num_steps, init_hidden=init_hidden, init_cell=init_cell)
elif rnn_model == "cudnn":
x_emb = layers.transpose( x_emb, perm=[1, 0, 2])
rnn_out, last_hidden, last_cell = layers.lstm( x_emb, init_hidden, init_cell, num_steps, hidden_size, num_layers, \
is_bidirec=False, \
default_initializer=fluid.initializer.UniformInitializer(low=-init_scale, high=init_scale) )
rnn_out = layers.transpose( rnn_out, perm=[1, 0, 2])
else:
print( "type not support")
return
rnn_out = layers.reshape(rnn_out, shape=[-1, num_steps, hidden_size])
softmax_weight = layers.create_parameter([hidden_size, vocab_size], dtype="float32", name="softmax_weight", \
default_initializer=fluid.initializer.UniformInitializer(low=-init_scale, high=init_scale))
softmax_bias = layers.create_parameter([vocab_size], dtype="float32", name='softmax_bias', \
default_initializer=fluid.initializer.UniformInitializer(low=-init_scale, high=init_scale))
projection = layers.matmul(rnn_out, softmax_weight)
projection = layers.elementwise_add(projection, softmax_bias)
projection = layers.reshape(projection, shape=[-1, vocab_size])
#y = layers.reshape( y, shape=[-1, vocab_size])
loss = layers.softmax_with_cross_entropy(
logits=projection, label=y, soft_label=False)
loss = layers.reshape(loss, shape=[-1, num_steps])
loss = layers.reduce_mean(loss, dim=[0])
loss = layers.reduce_sum(loss)
loss.permissions = True
feeding_list = ['x', 'y', 'init_hidden', 'init_cell']
return loss, last_hidden, last_cell, feeding_list
| kuke/models | fluid/PaddleNLP/language_model/lstm/lm_model.py | Python | apache-2.0 | 12,234 |
from random import shuffle
import sys
import argparse
import re
parser = argparse.ArgumentParser(description="Colorize stdin; (optionally) add a tag.")
parser.add_argument("--tag", type=str, help="Optional tag")
parser.add_argument("--no-color", action="store_true", help="Flag to force the output to be no color.")
args = parser.parse_args()
tag = "[{}]".format(args.tag) if args.tag else ""
ALL_COLORS = [
"\u001b[30m", # Black
"\u001b[31m", # Red
"\u001b[32m", # Green
"\u001b[33m", # Yellow
"\u001b[34m", # Blue
"\u001b[35m", # Magenta
"\u001b[36m", # Cyan
]
RESET = "\u001b[0m"
shuffle(ALL_COLORS)
COLOR = ALL_COLORS[0]
if args.no_color:
COLOR = ""
RESET = ""
# https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python
ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
for line in sys.stdin:
print(
"{begin_color} {tag} {line} {end_color}".format(
begin_color=COLOR,
tag=tag,
line=ansi_escape.sub('', line.strip()),
end_color=RESET, # Reset
)
)
sys.stdout.flush()
| ucbrise/clipper | bin/colorize_output.py | Python | apache-2.0 | 1,153 |
#!/usr/bin/python3
import pygame
import sys
import math
# own modules
#import pygame.math.Vector2
#import pygame.math.Vector3 as Vec3d
#import pygame.math.Vector2 as Vec2d
from Vec2d import Vec2d
from Vec3d import Vec3d
class Circle(object):
"""a Circle in 3D Space"""
def __init__(self, surface, color, center=Vec3d(0, 0, 0), size=Vec3d(1, 1, 1), steps=36.0, viewer_distance=256, fov=2):
self.surface = surface
self.color = color
self.size = size
self.center = center
self.steps = steps
self.viewer_distance = viewer_distance
self.fov = fov
# class variables
self.circle_raw_points = []
self.circle = None
self.transformed_circle = None
# generate master circle radius 1.0 no rotation
self.generate()
self.resize_and_center()
def generate(self):
"""generate master circle"""
radius = 1.0
# draw circle in x-z plane at y=0
y = 0.0
for angle in self.step_generator(0, 2 * math.pi, self.steps):
x = math.cos(angle) * radius
z = math.sin(angle) * radius
self.circle_raw_points.append(Vec3d(x, y, z))
def resize_and_center(self):
"""recenter and resize master circle"""
self.circle = []
for point in self.circle_raw_points:
self.circle.append(point * self.size + self.center)
def set_color(self, color):
"""setter for self.color"""
self.color = color
def set_size(self, size=Vec3d(1, 1, 1)):
"""sets size and resizes circle"""
self.size = size
self.resize_and_center()
def set_center(self, center=Vec3d(0, 0, 0)):
"""sets center and recenters circle"""
self.center = center
self.resize_and_center()
def rotate(self, dx, dy, dz, offset2d=Vec2d(0, 0)):
"""rotates circle points and generates tranformed point array in 2d"""
# rotate and project every point to 2d
self.transformed_circle = []
for point in self.circle:
new_point = point.rotated_around_x(dx).rotated_around_y(dy).rotated_around_z(dz)
transformed = new_point.project(self.surface.get_width(), self.surface.get_height(), self.viewer_distance, self.fov)
self.transformed_circle.append((transformed.x + offset2d.x, transformed.y + offset2d.y))
def update(self, viewer_distance, fov):
"""drawing"""
self.viewer_distance = viewer_distance
self.fov = fov
pygame.draw.polygon(self.surface, self.color, self.transformed_circle, 1)
@staticmethod
def step_generator(start, stop, steps):
"""yields steps values between start and stop for float values"""
distance = stop - start
step = float(distance) / float(steps)
value = float(start)
while value <= stop:
yield value
value += step
def test():
"""test"""
try:
fps = 50
surface = pygame.display.set_mode((600, 600))
pygame.init()
theta = 1
step = math.pi / 180
spheres = (
Circle(surface, (100, 0, 0), Vec3d(-1.5, -1.5, 1.5), Vec3d(1, 1, 1)),
)
clock = pygame.time.Clock()
# for 3d projection
fov = 1
viewer_distance = 256
pause = False
color = pygame.Color(255, 255, 255, 255)
while True:
clock.tick(fps)
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
sys.exit(0)
keyinput = pygame.key.get_pressed()
if keyinput is not None:
# print keyinput
if keyinput[pygame.K_ESCAPE]:
sys.exit(1)
if keyinput[pygame.K_UP]:
viewer_distance += 1
if keyinput[pygame.K_DOWN]:
viewer_distance -= 1
if keyinput[pygame.K_PLUS]:
fov += .1
if keyinput[pygame.K_MINUS]:
fov -= .1
if keyinput[pygame.K_p]:
pause = not pause
if keyinput[pygame.K_r]:
viewer_distance = 256
fov = 2
if pause is not True:
surface.fill((0, 0, 0, 255))
for thing in spheres:
# rotate
thing.rotate(dx=theta, dy=theta, dz=0.0, offset2d=Vec2d(0, 0))
theta += step * 16
# color changing
thing.set_color(color=color)
# draw
thing.update(viewer_distance=viewer_distance, fov=fov)
pygame.display.flip()
except KeyboardInterrupt:
print 'shutting down'
if __name__ == "__main__":
test()
| gunny26/python-demoscene | Circle.py | Python | gpl-2.0 | 4,946 |
import sys, getopt, csv
from math import log
def main(argv):
try:
opts, args = getopt.getopt(argv, "w:h:r:c:",["ref=","cmp="])
except getopt.GetoptError:
print 'psnr.py -r <reference yuv> -c <comparing yuv>'
sys.exit(2)
width = 1920
height = 1080
ref_yuv = ''
cmp_yuv = ''
for opt, arg in opts:
if opt == '-w':
weight = arg
elif opt == '-h':
height = arg
elif opt in ("-r", "--ref"):
ref_yuv = arg
elif opt in ("-c", "--cmp"):
cmp_yuv = arg
else:
print 'unknown opt: ', opt
if ref_yuv == '' or cmp_yuv == '':
print 'psnr.py -r <reference yuv> -c <comparing yuv>'
sys.exit(2)
print 'width: %d, height: %d' % (width, height)
print 'reference yuv: ', ref_yuv
print 'comparing yuv: ', cmp_yuv
with open(ref_yuv, 'rb') as ref, \
open(cmp_yuv, 'rb') as cmp, \
open('psnr.csv', 'wt') as psnr:
while True:
r = bytearray(ref.read(width * height));
c = bytearray(cmp.read(width * height));
mse = reduce(lambda x, y: x + y, map(lambda x, y: (x - y) ** 2, r, c));
psnrY = 10 * log(255.0 * 255.0 / (float(mse) / width / height), 10)
r = bytearray(ref.read(width * height / 4));
c = bytearray(cmp.read(width * height / 4));
mse = reduce(lambda x, y: x + y, map(lambda x, y: (x - y) ** 2, r, c));
psnrU = 10 * log(255.0 * 255.0 / (float(mse) / width / height * 4), 10)
r = bytearray(ref.read(width * height / 4));
c = bytearray(cmp.read(width * height / 4));
mse = reduce(lambda x, y: x + y, map(lambda x, y: (x - y) ** 2, r, c));
psnrV = 10 * log(255.0 * 255.0 / (float(mse) / width / height * 4), 10)
print 'PSNR Y: %f, U: %f, V: %f' % (psnrY, psnrU, psnrV)
if __name__ == "__main__":
main(sys.argv[1:])
| dspmeng/code | scripts/psnr.py | Python | apache-2.0 | 1,982 |
from __future__ import annotations
from typing import TYPE_CHECKING, Container, cast
import numpy as np
from physt.binnings import BinningBase, as_binning
from physt.histogram1d import Histogram1D, ObjectWithBinning
if TYPE_CHECKING:
from typing import Any, Dict, Optional, Tuple
import physt
from physt.binnings import BinningLike
from physt.typing_aliases import ArrayLike
class HistogramCollection(Container[Histogram1D], ObjectWithBinning):
"""Experimental collection of histograms.
It contains (potentially name-addressable) 1-D histograms
with a shared binning.
"""
def __init__(
self,
*histograms: Histogram1D,
binning: Optional[BinningLike] = None,
title: Optional[str] = None,
name: Optional[str] = None,
):
self.histograms = list(histograms)
if histograms:
if binning:
raise ValueError(
"When creating collection from histograms, binning is deduced from them."
)
self._binning = histograms[0].binning
if not all(h.binning == self._binning for h in histograms):
raise ValueError("All histograms should share the same binning.")
else:
if binning is None:
raise ValueError("Either binning or at least one histogram must be provided.")
self._binning = as_binning(binning)
self.name = name
self.title = title or self.name
def __contains__(self, item):
try:
_ = self[item]
return True
except KeyError:
return False
def __iter__(self):
return iter(self.histograms)
def __len__(self):
return len(self.histograms)
def copy(self) -> "HistogramCollection":
# TODO: The binnings are probably not consistent in the copies
binning_copy = self.binning.copy()
histograms = [h.copy() for h in self.histograms]
for histogram in histograms:
histogram._binning = binning_copy
return HistogramCollection(*histograms, title=self.title, name=self.name)
@property
def binning(self) -> BinningBase:
return self._binning
@property
def axis_name(self) -> str:
return self.histograms[0].axis_name if self.histograms else "axis0"
@property
def axis_names(self) -> Tuple[str]:
return (self.axis_name,)
def add(self, histogram: Histogram1D) -> None:
"""Add a histogram to the collection."""
if self.binning and not self.binning == histogram.binning:
raise ValueError("Cannot add histogram with different binning.")
self.histograms.append(histogram)
def create(
self, name: str, values, *, weights=None, dropna: bool = True, **kwargs
) -> Histogram1D:
# TODO: Rename!
init_kwargs: Dict[str, Any] = {"axis_name": self.axis_name}
init_kwargs.update(kwargs)
histogram = Histogram1D(binning=self.binning, name=name, **init_kwargs)
histogram.fill_n(values, weights=weights, dropna=dropna)
self.histograms.append(histogram)
return histogram
def __getitem__(self, item) -> Histogram1D:
if isinstance(item, str):
candidates = [h for h in self.histograms if h.name == item]
if len(candidates) == 0:
raise KeyError(f"Collection does not contain histogram named '{item}'.")
return candidates[0]
else:
return self.histograms[item]
def __eq__(self, other) -> bool:
return (
(type(other) == HistogramCollection)
and (len(other) == len(self))
and all((h1 == h2) for h1, h2 in zip(self.histograms, other.histograms))
)
def normalize_bins(self, inplace: bool = False) -> "HistogramCollection":
"""Normalize each bin in the collection so that the sum is 1.0 for each bin.
Note: If a bin is zero in all collections, the result will be inf.
"""
col = self if inplace else self.copy()
sums = self.sum().frequencies
for h in col.histograms:
h.set_dtype(float)
h._frequencies /= sums
h._errors2 /= sums ** 2 # TODO: Does this make sense?
return col
def normalize_all(self, inplace: bool = False) -> "HistogramCollection":
"""Normalize all histograms so that total content of each of them is equal to 1.0."""
col = self if inplace else self.copy()
for h in col.histograms:
h.normalize(inplace=True)
return col
def sum(self) -> Histogram1D:
"""Return the sum of all contained histograms."""
if not self.histograms:
return Histogram1D(
data=np.zeros((self.binning.bin_count)), dtype=np.int64, binning=self.binning
)
return cast(Histogram1D, sum(self.histograms))
@property
def plot(self) -> "physt.plotting.PlottingProxy":
"""Proxy to plotting.
This attribute is a special proxy to plotting. In the most
simple cases, it can be used as a method. For more sophisticated
use, see the documentation for physt.plotting package.
"""
from physt.plotting import PlottingProxy
return PlottingProxy(self)
@classmethod
def multi_h1(cls, a_dict: Dict[str, ArrayLike], bins=None, **kwargs) -> "HistogramCollection":
"""Create a collection from multiple datasets."""
from physt.binnings import calculate_bins
mega_values: np.ndarray = np.concatenate(list(a_dict.values())) # type: ignore
binning = calculate_bins(mega_values, bins, **kwargs)
title = kwargs.pop("title", None)
name = kwargs.pop("name", None)
collection = HistogramCollection(binning=binning, title=title, name=name)
for key, value in a_dict.items():
collection.create(key, value)
return collection
@classmethod
def from_dict(cls, a_dict: Dict[str, Any]) -> "HistogramCollection":
from physt.io import create_from_dict
histograms = (
cast(Histogram1D, create_from_dict(item, "HistogramCollection", check_version=False))
for item in a_dict["histograms"]
)
return HistogramCollection(*histograms)
def to_dict(self) -> Dict[str, Any]:
return {
"histogram_type": "histogram_collection",
"histograms": [h.to_dict() for h in self.histograms],
}
def to_json(self, path: Optional[str] = None, **kwargs) -> str:
"""Convert to JSON representation.
Parameters
----------
path: Where to write the JSON.
Returns
-------
The JSON representation.
"""
from .io import save_json
return save_json(self, path, **kwargs)
| janpipek/physt | physt/histogram_collection.py | Python | mit | 6,905 |
# -*- coding: utf-8-*-
# 路线查询
import sys
import os
import re
import json, urllib
from urllib import urlencode
import socket
reload(sys)
sys.setdefaultencoding('utf8')
class JpLearn(object):
mappings = {}
scenes = {}
"""docstring for JpLearn"""
def __init__(self):
self.mappings = {
'第1课':'lesson_1',
'第2课':'lesson_2',
'第3课':'lesson_3',
'第4课':'lesson_4',
'第5课':'lesson_5',
'第6课':'lesson_6',
'第7课':'lesson_7',
'第8课':'lesson_8',
'第9课':'lesson_9',
'第10课':'lesson_10',
'第11课':'lesson_11',
'第12课':'lesson_12',
'第13课':'lesson_13',
'第14课':'lesson_14',
'第15课':'lesson_15',
'第16课':'lesson_16',
'第17课':'lesson_17',
'第18课':'lesson_18',
'第19课':'lesson_19',
'第20课':'lesson_20',
'第21课':'lesson_21',
'第22课':'lesson_22',
'第23课':'lesson_23',
'第24课':'lesson_24',
'第25课':'lesson_25',
'第26课':'lesson_26',
'第27课':'lesson_27',
'第28课':'lesson_28',
'第29课':'lesson_29',
'第30课':'lesson_30',
'第31课':'lesson_31',
'第32课':'lesson_32',
'第33课':'lesson_33',
'第34课':'lesson_34',
'第35课':'lesson_35',
'第36课':'lesson_36',
'第37课':'lesson_37',
'第38课':'lesson_38',
'第39课':'lesson_39',
'第40课':'lesson_40',
'第41课':'lesson_41',
'第42课':'lesson_42',
'第43课':'lesson_43',
'第44课':'lesson_44',
'第45课':'lesson_45',
'第46课':'lesson_46',
'第47课':'lesson_47',
'第48课':'lesson_48',
}
self.scenes = {
'钢铁侠':'iron_man',
'蜘蛛侠':'spider_man',
}
self.manual = {
'片假名':'2821057061',
}
pass
def run(self,text,prefix='',l_type='',use_local=False):
if l_type == 'grammar':
lesson = self.fenxiGrammarText(text)
try:
filename = self.mappings[lesson.encode('utf8')]
except Exception:
return ''
elif l_type == 'word':
lesson = self.fenxiWordText(text)
try:
filename = self.mappings[lesson.encode('utf8')]
except Exception:
return ''
elif l_type == 'scene':
lesson = self.fenxiSceneText(text)
try:
filename = self.mappings[lesson.encode('utf8')]
except Exception:
return ''
elif l_type == 'manual':
lesson = self.fenxiManualText(text)
try:
filename = self.manual[lesson.encode('utf8')]
except Exception:
return ''
use_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/jp_'+l_type+'/'+filename+'.txt')
try:
if use_local:
myname = socket.getfqdn(socket.gethostname( ))
#获取本机ip
myaddr = socket.gethostbyname(myname)
return '点击查看:http://'+myaddr+'/wap/quicksearch.html?f='+filename+'&s=jp&t='+l_type+'&ip='+myaddr
else:
res = ''
for line in open(use_path):
res = res + prefix +line
return res
except IOError:
return '找不到' + lesson
def fenxiWordText(self,text):
# PATTERN = ur'(日语初级){0,4}?[\u4e00-\u9fa5]{1,10}'
# data = '日语初级第N课'
PATTERN = ur'((?:日语初级单词)?[\u4e00-\u9fa50-9]{1,10})'
data_utf8 = text.decode('utf8')
pattern = re.compile(PATTERN)
m = pattern.search(data_utf8)
return m.group(1).replace('日语初级单词','')
def fenxiSceneText(self,text):
PATTERN = ur'((?:日语场景)?[\u4e00-\u9fa50-9]{1,10})'
data_utf8 = text.decode('utf8')
pattern = re.compile(PATTERN)
m = pattern.search(data_utf8)
return m.group(1).replace('日语场景','')
def fenxiManualText(self,text):
# PATTERN = ur'(日语初级){0,4}?[\u4e00-\u9fa5]{1,10}'
# data = '日语初级第N课'
PATTERN = ur'((?:日语手册)?[\u4e00-\u9fa50-9]{1,10})'
data_utf8 = text.decode('utf8')
pattern = re.compile(PATTERN)
m = pattern.search(data_utf8)
return m.group(1).replace('日语手册','').strip()
def fenxiGrammarText(self,text):
# PATTERN = ur'(日语初级){0,4}?[\u4e00-\u9fa5]{1,10}'
# data = '日语初级第N课'
PATTERN = ur'((?:日语初级文法)?[\u4e00-\u9fa50-9]{1,10})'
data_utf8 = text.decode('utf8')
pattern = re.compile(PATTERN)
m = pattern.search(data_utf8)
return m.group(1).replace('日语初级文法','') | masonyang/test | jplearn.py | Python | gpl-3.0 | 4,204 |
"""
Django settings for djasmine project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import jasmine_core
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o)-_2wabolqon7(o72)-p1lrizz6h&nifj!=#bz3pe1z_!pkx%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.staticfiles',
'djasmine',
]
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
]
ROOT_URLCONF = 'test_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'test_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
JASMINE_SPEC_ROOT = BASE_DIR + '/spec'
STATIC_URL = '/static/'
STATICFILES_DIRS = [
jasmine_core.__path__[0],
JASMINE_SPEC_ROOT,
]
| tjwalch/djasmine | test_project/settings.py | Python | mit | 1,951 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEST_RUNNER = "django.test.runner.DiscoverRunner"
SECRET_KEY = 'i%&(axb!!5yfg6kv$m*ytf9i-0)z-&1y-wkmv^oz#6l&$*+!v6'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
}
| PetrDlouhy/django-gpxpy | tests/settings.py | Python | gpl-3.0 | 340 |
"""Test class for GPG Key CLI.
The gpg sub-command was deprecated in favour of content-credential in
Satellite 6.8
:Requirement: ContentCredential
:CaseAutomation: Automated
:CaseLevel: Component
:CaseComponent: ContentCredentials
:Assignee: swadeley
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from tempfile import mkstemp
import pytest
from fauxfactory import gen_alphanumeric
from fauxfactory import gen_choice
from fauxfactory import gen_integer
from fauxfactory import gen_string
from robottelo import ssh
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.content_credentials import ContentCredential
from robottelo.cli.factory import CLIFactoryError
from robottelo.cli.factory import make_content_credential
from robottelo.cli.factory import make_product
from robottelo.cli.factory import make_repository
from robottelo.cli.org import Org
from robottelo.cli.product import Product
from robottelo.cli.repository import Repository
from robottelo.constants import DEFAULT_ORG
from robottelo.constants import VALID_GPG_KEY_FILE
from robottelo.datafactory import invalid_values_list
from robottelo.datafactory import parametrized
from robottelo.datafactory import valid_data_list
from robottelo.helpers import get_data_file
VALID_GPG_KEY_FILE_PATH = get_data_file(VALID_GPG_KEY_FILE)
def create_gpg_key_file(content=None):
"""Creates a fake GPG Key file and returns its path or None if an error
happens.
"""
(_, key_filename) = mkstemp(text=True)
if not content:
content = gen_alphanumeric(gen_integer(20, 50))
with open(key_filename, "w") as gpg_key_file:
gpg_key_file.write(content)
return key_filename
return None
search_key = 'name'
@pytest.mark.build_sanity
@pytest.mark.tier1
def test_verify_gpg_key_content_displayed(module_org):
"""content-credential info should display key content
:id: 0ee87ee0-8bf1-4d15-b5f9-0ac364e61155
:expectedresults: content-credentials info should display key content
:CaseImportance: Critical
"""
# Setup a new key file
content = gen_alphanumeric()
key_path = create_gpg_key_file(content=content)
assert key_path, 'GPG Key file must be created'
gpg_key = make_content_credential(
{'path': key_path, 'name': gen_string('alpha'), 'organization-id': module_org.id}
)
assert gpg_key['content'] == content
@pytest.mark.tier1
def test_positive_get_info_by_name(module_org):
"""Create single gpg key and get its info by name
:id: 890456ea-0b31-4386-9231-f47572f26d08
:expectedresults: specific information for GPG key matches the creation
name
:CaseImportance: Critical
"""
name = gen_string('utf8')
gpg_key = make_content_credential(
{'key': VALID_GPG_KEY_FILE_PATH, 'name': name, 'organization-id': module_org.id}
)
gpg_key = ContentCredential.info({'name': gpg_key['name'], 'organization-id': module_org.id})
assert gpg_key['name'] == name
@pytest.mark.parametrize('name', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_create_with_default_org(name, module_org, default_org):
"""Create gpg key with valid name and valid gpg key via file
import using the default created organization
:id: 4265dfd1-dc64-4119-8a64-8724b09d6fb7
:parametrized: yes
:expectedresults: gpg key is created
:CaseImportance: Critical
"""
org = Org.info({'name': DEFAULT_ORG})
gpg_key = make_content_credential(
{'key': VALID_GPG_KEY_FILE_PATH, 'name': name, 'organization-id': org['id']}
)
# Can we find the new object?
result = ContentCredential.exists(
{'organization-id': org['id']}, (search_key, gpg_key[search_key])
)
assert gpg_key[search_key] == result[search_key]
@pytest.mark.parametrize('name', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_create_with_custom_org(name, module_org):
"""Create gpg key with valid name and valid gpg key via file
import using a new organization
:id: 10dd9fc0-e088-4cf1-9fb6-24fe04df2895
:parametrized: yes
:expectedresults: gpg key is created
:CaseImportance: Critical
"""
gpg_key = make_content_credential(
{
'key': VALID_GPG_KEY_FILE_PATH,
'name': name,
'organization-id': module_org.id,
}
)
# Can we find the new object?
result = ContentCredential.exists(
{'organization-id': module_org.id},
(search_key, gpg_key[search_key]),
)
assert gpg_key[search_key] == result[search_key]
@pytest.mark.tier1
def test_negative_create_with_same_name(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then try to create new one with same name
:id: 8751745c-5cf6-42f7-8fbd-6c23119da486
:expectedresults: gpg key is not created
:CaseImportance: Critical
"""
name = gen_string('alphanumeric')
gpg_key = make_content_credential({'name': name, 'organization-id': module_org.id})
# Can we find the new object?
result = ContentCredential.exists(
{'organization-id': module_org.id}, (search_key, gpg_key[search_key])
)
assert gpg_key[search_key] == result[search_key]
with pytest.raises(CLIFactoryError):
make_content_credential({'name': name, 'organization-id': module_org.id})
@pytest.mark.parametrize('name', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_negative_create_with_no_gpg_key(name, module_org):
"""Create gpg key with valid name and no gpg key
:id: bbfd5306-cfe7-40c1-a3a2-35834108163c
:parametrized: yes
:expectedresults: gpg key is not created
:CaseImportance: Critical
"""
with pytest.raises(CLIReturnCodeError):
ContentCredential.create({'name': name, 'organization-id': module_org.id})
@pytest.mark.parametrize('name', **parametrized(invalid_values_list()))
@pytest.mark.tier1
def test_negative_create_with_invalid_name(name, module_org):
"""Create gpg key with invalid name and valid gpg key via
file import
:id: fbbaf8a5-1570-4910-9f6a-baa35b15d2ad
:parametrized: yes
:expectedresults: gpg key is not created
:CaseImportance: Critical
"""
with pytest.raises(CLIFactoryError):
# factory will provide a valid key
make_content_credential({'name': name, 'organization-id': module_org.id})
@pytest.mark.parametrize('name', **parametrized(valid_data_list()))
@pytest.mark.tier1
@pytest.mark.upgrade
def test_positive_delete(name, module_org):
"""Create gpg key with valid name and valid gpg key via file
import then delete it
:id: 9640cabc-e0c3-41a0-b4de-99b06bf51c02
:parametrized: yes
:expectedresults: gpg key is deleted
:CaseImportance: Critical
"""
gpg_key = make_content_credential({'name': name, 'organization-id': module_org.id})
result = ContentCredential.exists(
{'organization-id': module_org.id},
(search_key, gpg_key[search_key]),
)
assert gpg_key[search_key] == result[search_key]
ContentCredential.delete({'name': name, 'organization-id': module_org.id})
result = ContentCredential.exists(
{'organization-id': module_org.id},
(search_key, gpg_key[search_key]),
)
assert (len(result)) == 0
@pytest.mark.parametrize('new_name', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_update_name(new_name, module_org):
"""Create gpg key with valid name and valid gpg key via file
import then update its name
:id: f3bb254d-f831-4f86-944a-26d9a36bd906
:parametrized: yes
:expectedresults: gpg key is updated
:CaseImportance: Critical
"""
gpg_key = make_content_credential({'organization-id': module_org.id})
ContentCredential.update(
{
'name': gpg_key['name'],
'new-name': new_name,
'organization-id': module_org.id,
}
)
gpg_key = ContentCredential.info({'name': new_name, 'organization-id': module_org.id})
@pytest.mark.parametrize('name', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_update_key(name, module_org):
"""Create gpg key with valid name and valid gpg key via file
import then update its gpg key file
:id: d3a72892-3414-4178-98b7-e0780d9b6587
:parametrized: yes
:expectedresults: gpg key is updated
:CaseImportance: Critical
"""
gpg_key = make_content_credential({'organization-id': module_org.id})
content = gen_alphanumeric(gen_integer(20, 50))
assert gpg_key['content'] != content
local_key = create_gpg_key_file(content)
assert gpg_key, 'GPG Key file must be created'
key = '/tmp/%s' % gen_alphanumeric()
ssh.upload_file(local_file=local_key, remote_file=key)
ContentCredential.update(
{'path': key, 'name': gpg_key['name'], 'organization-id': module_org.id}
)
gpg_key = ContentCredential.info({'name': gpg_key['name'], 'organization-id': module_org.id})
assert gpg_key['content'] == content
@pytest.mark.parametrize('new_name', **parametrized(invalid_values_list()))
@pytest.mark.tier1
def test_negative_update_name(new_name, module_org):
"""Create gpg key with valid name and valid gpg key via file
import then fail to update its name
:id: 98cda40a-49d0-42ce-91a6-31fa7b7f330b
:parametrized: yes
:expectedresults: gpg key is not updated
:CaseImportance: Critical
"""
gpg_key = make_content_credential({'organization-id': module_org.id})
with pytest.raises(CLIReturnCodeError):
ContentCredential.update(
{
'name': gpg_key['name'],
'new-name': new_name,
'organization-id': module_org.id,
}
)
@pytest.mark.tier2
def test_positive_add_empty_product(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it with empty (no repos) custom product
:id: 61c700db-43ab-4b8c-8527-f4cfc085afaa
:expectedresults: gpg key is associated with product
:CaseLevel: Integration
"""
gpg_key = make_content_credential({'organization-id': module_org.id})
product = make_product({'gpg-key-id': gpg_key['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key'] == gpg_key['name']
@pytest.mark.tier2
def test_positive_add_product_with_repo(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it with custom product that has one repository
:id: f315eadd-e65b-4952-912f-f640867ad656
:expectedresults: gpg key is associated with product as well as with
the repository
:CaseLevel: Integration
"""
product = make_product({'organization-id': module_org.id})
repo = make_repository({'product-id': product['id']})
gpg_key = make_content_credential({'organization-id': module_org.id})
Product.update(
{'gpg-key-id': gpg_key['id'], 'id': product['id'], 'organization-id': module_org.id}
)
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
repo = Repository.info({'id': repo['id']})
assert product['gpg']['gpg-key-id'] == gpg_key['id']
assert repo['gpg-key']['id'] == gpg_key['id']
@pytest.mark.tier2
def test_positive_add_product_with_repos(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it with custom product that has more than one
repository
:id: 76683f3e-7705-4719-996e-c026839053bb
:expectedresults: gpg key is associated with product as well as with
the repositories
:CaseLevel: Integration
"""
product = make_product({'organization-id': module_org.id})
repos = [make_repository({'product-id': product['id']}) for _ in range(gen_integer(2, 5))]
gpg_key = make_content_credential({'organization-id': module_org.id})
Product.update(
{'gpg-key-id': gpg_key['id'], 'id': product['id'], 'organization-id': module_org.id}
)
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key-id'] == gpg_key['id']
for repo in repos:
repo = Repository.info({'id': repo['id']})
assert repo['gpg-key']['id'] == gpg_key['id']
@pytest.mark.tier2
def test_positive_add_repo_from_product_with_repo(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it to repository from custom product that has
one repository
:id: da568a0e-69b1-498e-a747-6881aac7409e
:expectedresults: gpg key is associated with the repository but not
with the product
:CaseLevel: Integration
"""
product = make_product({'organization-id': module_org.id})
repo = make_repository({'product-id': product['id']})
gpg_key = make_content_credential({'organization-id': module_org.id})
Repository.update(
{'gpg-key-id': gpg_key['id'], 'id': repo['id'], 'organization-id': module_org.id}
)
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
repo = Repository.info({'id': repo['id']})
assert repo['gpg-key']['id'] == gpg_key['id']
assert product['gpg'].get('gpg-key-id') != gpg_key['id']
@pytest.mark.tier2
def test_positive_add_repo_from_product_with_repos(module_org):
"""Create gpg key via file import and associate with custom repo
GPGKey should contain valid name and valid key and should be associated
to one repository from custom product. Make sure custom product should
have more than one repository.
:id: e3019a61-ec32-4044-9087-e420b8db4e09
:expectedresults: gpg key is associated with the repository
:CaseLevel: Integration
"""
product = make_product({'organization-id': module_org.id})
repos = [make_repository({'product-id': product['id']}) for _ in range(gen_integer(2, 5))]
gpg_key = make_content_credential({'organization-id': module_org.id})
Repository.update(
{'gpg-key-id': gpg_key['id'], 'id': repos[0]['id'], 'organization-id': module_org.id}
)
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg'].get('gpg-key-id') != gpg_key['id']
# First repo should have a valid gpg key assigned
repo = Repository.info({'id': repos.pop(0)['id']})
assert repo['gpg-key']['id'] == gpg_key['id']
# The rest of repos should not
for repo in repos:
repo = Repository.info({'id': repo['id']})
assert repo['gpg-key'].get('id') != gpg_key['id']
@pytest.mark.tier2
def test_positive_update_key_for_empty_product(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it with empty (no repos) custom product then
update the key
:id: 13aa6e0c-4255-483a-af33-ea7e82ee7766
:expectedresults: gpg key is associated with product before/after
update
:CaseLevel: Integration
"""
# Create a product and a gpg key
product = make_product({'organization-id': module_org.id})
gpg_key = make_content_credential({'organization-id': module_org.id})
# Associate gpg key with a product
Product.update(
{'gpg-key-id': gpg_key['id'], 'id': product['id'], 'organization-id': module_org.id}
)
# Verify gpg key was associated
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key'] == gpg_key['name']
# Update the gpg key
new_name = gen_choice(list(valid_data_list().values()))
ContentCredential.update(
{'name': gpg_key['name'], 'new-name': new_name, 'organization-id': module_org.id}
)
# Verify changes are reflected in the gpg key
gpg_key = ContentCredential.info({'id': gpg_key['id'], 'organization-id': module_org.id})
assert gpg_key['name'] == new_name
# Verify changes are reflected in the product
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key'] == new_name
@pytest.mark.tier2
def test_positive_update_key_for_product_with_repo(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it with custom product that has one repository
then update the key
:id: 1f8f943c-a611-4ed2-9d8a-770f60a549a7
:expectedresults: gpg key is associated with product before/after
update as well as with the repository
:CaseLevel: Integration
"""
# Create a product and a gpg key
product = make_product({'organization-id': module_org.id})
gpg_key = make_content_credential({'organization-id': module_org.id})
# Create a repository and assign it to the product
repo = make_repository({'product-id': product['id']})
# Associate gpg key with a product
Product.update(
{'gpg-key-id': gpg_key['id'], 'id': product['id'], 'organization-id': module_org.id}
)
# Verify gpg key was associated
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
repo = Repository.info({'id': repo['id']})
assert product['gpg']['gpg-key'] == gpg_key['name']
assert repo['gpg-key'].get('name') == gpg_key['name']
# Update the gpg key
new_name = gen_choice(list(valid_data_list().values()))
ContentCredential.update(
{'name': gpg_key['name'], 'new-name': new_name, 'organization-id': module_org.id}
)
# Verify changes are reflected in the gpg key
gpg_key = ContentCredential.info({'id': gpg_key['id'], 'organization-id': module_org.id})
assert gpg_key['name'] == new_name
# Verify changes are reflected in the product
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key'] == new_name
# Verify changes are reflected in the repository
repo = Repository.info({'id': repo['id']})
assert repo['gpg-key'].get('id') == gpg_key['id']
@pytest.mark.tier2
def test_positive_update_key_for_product_with_repos(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it with custom product that has more than one
repository then update the key
:id: 8aa3dc75-6257-48ae-b3f9-c617e323b47a
:expectedresults: gpg key is associated with product before/after
update as well as with the repositories
:CaseLevel: Integration
"""
# Create a product and a gpg key
product = make_product({'organization-id': module_org.id})
gpg_key = make_content_credential({'organization-id': module_org.id})
# Create repositories and assign them to the product
repos = [make_repository({'product-id': product['id']}) for _ in range(gen_integer(2, 5))]
# Associate gpg key with a product
Product.update(
{'gpg-key-id': gpg_key['id'], 'id': product['id'], 'organization-id': module_org.id}
)
# Verify gpg key was associated
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key'] == gpg_key['name']
for repo in repos:
repo = Repository.info({'id': repo['id']})
assert repo['gpg-key'].get('name') == gpg_key['name']
# Update the gpg key
new_name = gen_choice(list(valid_data_list().values()))
ContentCredential.update(
{'name': gpg_key['name'], 'new-name': new_name, 'organization-id': module_org.id}
)
# Verify changes are reflected in the gpg key
gpg_key = ContentCredential.info({'id': gpg_key['id'], 'organization-id': module_org.id})
assert gpg_key['name'] == new_name
# Verify changes are reflected in the product
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key'] == new_name
# Verify changes are reflected in the repositories
for repo in repos:
repo = Repository.info({'id': repo['id']})
assert repo['gpg-key'].get('name') == new_name
@pytest.mark.tier2
def test_positive_update_key_for_repo_from_product_with_repo(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it to repository from custom product that has
one repository then update the key
:id: 2fee5f35-6e0e-4b7c-8a8a-46ee1e77919d
:expectedresults: gpg key is associated with the repository
before/after update, but not with the product
:CaseLevel: Integration
"""
# Create a product and a gpg key
product = make_product({'organization-id': module_org.id})
gpg_key = make_content_credential({'organization-id': module_org.id})
# Create repository, assign product and gpg-key
repo = make_repository({'gpg-key-id': gpg_key['id'], 'product-id': product['id']})
# Verify gpg key was associated
assert repo['gpg-key'].get('name') == gpg_key['name']
# Update the gpg key
new_name = gen_choice(list(valid_data_list().values()))
ContentCredential.update(
{'name': gpg_key['name'], 'new-name': new_name, 'organization-id': module_org.id}
)
# Verify changes are reflected in the gpg key
gpg_key = ContentCredential.info({'id': gpg_key['id'], 'organization-id': module_org.id})
assert gpg_key['name'] == new_name
# Verify changes are reflected in the repositories
repo = Repository.info({'id': repo['id']})
assert repo['gpg-key'].get('name') == new_name
# Verify gpg key wasn't added to the product
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key'] != new_name
@pytest.mark.tier2
def test_positive_update_key_for_repo_from_product_with_repos(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it to repository from custom product that has
more than one repository then update the key
:id: c548ed4f-7f2d-456f-a644-7597644f6457
:expectedresults: gpg key is associated with a single repository
before/after update and not associated with product or other
repositories
:CaseLevel: Integration
"""
# Create a product and a gpg key
product = make_product({'organization-id': module_org.id})
gpg_key = make_content_credential({'organization-id': module_org.id})
# Create repositories and assign them to the product
repos = [make_repository({'product-id': product['id']}) for _ in range(gen_integer(2, 5))]
# Associate gpg key with a single repository
Repository.update(
{'gpg-key-id': gpg_key['id'], 'id': repos[0]['id'], 'organization-id': module_org.id}
)
# Verify gpg key was associated
repos[0] = Repository.info({'id': repos[0]['id']})
assert repos[0]['gpg-key']['name'] == gpg_key['name']
# Update the gpg key
new_name = gen_choice(list(valid_data_list().values()))
ContentCredential.update(
{'name': gpg_key['name'], 'new-name': new_name, 'organization-id': module_org.id}
)
# Verify changes are reflected in the gpg key
gpg_key = ContentCredential.info({'id': gpg_key['id'], 'organization-id': module_org.id})
assert gpg_key['name'] == new_name
# Verify changes are reflected in the associated repository
repos[0] = Repository.info({'id': repos[0]['id']})
assert repos[0]['gpg-key'].get('name') == new_name
# Verify changes are not reflected in the product
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key'] != new_name
# Verify changes are not reflected in the rest of repositories
for repo in repos[1:]:
repo = Repository.info({'id': repo['id']})
assert repo['gpg-key'].get('name') != new_name
@pytest.mark.tier2
def test_positive_delete_key_for_empty_product(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it with empty (no repos) custom product
then delete it
:id: 238a80f8-983a-4fd5-a168-6ef9442e2b1c
:expectedresults: gpg key is associated with product during creation
but removed from product after deletion
:CaseLevel: Integration
"""
# Create a product and a gpg key
gpg_key = make_content_credential({'organization-id': module_org.id})
product = make_product({'gpg-key-id': gpg_key['id'], 'organization-id': module_org.id})
# Verify gpg key was associated
assert product['gpg']['gpg-key'] == gpg_key['name']
# Delete the gpg key
ContentCredential.delete({'name': gpg_key['name'], 'organization-id': module_org.id})
# Verify gpg key was actually deleted
with pytest.raises(CLIReturnCodeError):
ContentCredential.info({'id': gpg_key['id'], 'organization-id': module_org.id})
# Verify gpg key was disassociated from the product
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key'] != gpg_key['name']
@pytest.mark.tier2
@pytest.mark.upgrade
def test_positive_delete_key_for_product_with_repo(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it with custom product that has one repository
then delete it
:id: 1e98e588-8b5d-475c-ad84-5d566df5619c
:expectedresults: gpg key is associated with product but and its
repository during creation but removed from product and repository
after deletion
:CaseLevel: Integration
"""
# Create product, repository and gpg key
product = make_product({'organization-id': module_org.id})
repo = make_repository({'product-id': product['id']})
gpg_key = make_content_credential({'organization-id': module_org.id})
# Associate gpg key with a product
Product.update(
{'gpg-key-id': gpg_key['id'], 'id': product['id'], 'organization-id': module_org.id}
)
# Verify gpg key was associated both with product and its repository
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
repo = Repository.info({'id': repo['id']})
assert product['gpg']['gpg-key'] == gpg_key['name']
assert repo['gpg-key'].get('name') == gpg_key['name']
# Delete the gpg key
ContentCredential.delete({'name': gpg_key['name'], 'organization-id': module_org.id})
# Verify gpg key was actually deleted
with pytest.raises(CLIReturnCodeError):
ContentCredential.info({'id': gpg_key['id'], 'organization-id': module_org.id})
# Verify gpg key was disassociated from the product and its repository
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
repo = Repository.info({'id': repo['id']})
assert product['gpg']['gpg-key'] != gpg_key['name']
assert repo['gpg-key'].get('name') != gpg_key['name']
@pytest.mark.tier2
def test_positive_delete_key_for_product_with_repos(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it with custom product that has more than one
repository then delete it
:id: 3848441f-746a-424c-afc3-4d5a15888af8
:expectedresults: gpg key is associated with product and its
repositories during creation but removed from the product and the
repositories after deletion
:CaseLevel: Integration
"""
# Create product, repositories and gpg key
product = make_product({'organization-id': module_org.id})
repos = [make_repository({'product-id': product['id']}) for _ in range(gen_integer(2, 5))]
gpg_key = make_content_credential({'organization-id': module_org.id})
# Associate gpg key with a product
Product.update(
{'gpg-key-id': gpg_key['id'], 'id': product['id'], 'organization-id': module_org.id}
)
# Verify gpg key was associated with product and its repositories
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key'] == gpg_key['name']
for repo in repos:
repo = Repository.info({'id': repo['id']})
assert repo['gpg-key'].get('name') == gpg_key['name']
# Delete the gpg key
ContentCredential.delete({'name': gpg_key['name'], 'organization-id': module_org.id})
# Verify gpg key was actually deleted
with pytest.raises(CLIReturnCodeError):
ContentCredential.info({'id': gpg_key['id'], 'organization-id': module_org.id})
# Verify gpg key was disassociated from the product and its
# repositories
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key'] != gpg_key['name']
for repo in repos:
repo = Repository.info({'id': repo['id']})
assert repo['gpg-key'].get('name') != gpg_key['name']
@pytest.mark.tier2
def test_positive_delete_key_for_repo_from_product_with_repo(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it to repository from custom product that has
one repository then delete the key
:id: 2555b08f-8cee-4e84-8f4d-9b46743f5758
:expectedresults: gpg key is associated with the single repository but
not the product during creation and was removed from repository
after deletion
:CaseLevel: Integration
"""
# Create product, repository and gpg key
product = make_product({'organization-id': module_org.id})
repo = make_repository({'product-id': product['id']})
gpg_key = make_content_credential({'organization-id': module_org.id})
# Associate gpg key with a repository
Repository.update(
{'gpg-key-id': gpg_key['id'], 'id': repo['id'], 'organization-id': module_org.id}
)
# Verify gpg key was associated with the repository but not with the
# product
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
repo = Repository.info({'id': repo['id']})
assert product['gpg']['gpg-key'] != gpg_key['name']
assert repo['gpg-key'].get('name') == gpg_key['name']
# Delete the gpg key
ContentCredential.delete({'name': gpg_key['name'], 'organization-id': module_org.id})
# Verify gpg key was actually deleted
with pytest.raises(CLIReturnCodeError):
ContentCredential.info({'id': gpg_key['id'], 'organization-id': module_org.id})
# Verify gpg key was disassociated from the repository
repo = Repository.info({'id': repo['id']})
assert repo['gpg-key'].get('name') != gpg_key['name']
@pytest.mark.tier2
def test_positive_delete_key_for_repo_from_product_with_repos(module_org):
"""Create gpg key with valid name and valid gpg key via file
import then associate it to repository from custom product that has
more than one repository then delete the key
:id: 7d6a278b-1063-4e72-bc32-ca60bd17bb84
:expectedresults: gpg key is associated with a single repository but
not the product during creation and removed from repository after
deletion
:CaseLevel: Integration
"""
# Create product, repositories and gpg key
product = make_product({'organization-id': module_org.id})
repos = []
for _ in range(gen_integer(2, 5)):
repos.append(make_repository({'product-id': product['id']}))
gpg_key = make_content_credential({'organization-id': module_org.id})
# Associate gpg key with a repository
Repository.update(
{'gpg-key-id': gpg_key['id'], 'id': repos[0]['id'], 'organization-id': module_org.id}
)
# Verify gpg key was associated with the repository
repos[0] = Repository.info({'id': repos[0]['id']})
assert repos[0]['gpg-key']['name'] == gpg_key['name']
# Delete the gpg key
ContentCredential.delete({'name': gpg_key['name'], 'organization-id': module_org.id})
# Verify gpg key was actually deleted
with pytest.raises(CLIReturnCodeError):
ContentCredential.info({'id': gpg_key['id'], 'organization-id': module_org.id})
# Verify gpg key is not associated with any repository or the product
# itself
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert product['gpg']['gpg-key'] != gpg_key['name']
for repo in repos:
repo = Repository.info({'id': repo['id']})
assert repo['gpg-key'].get('name') != gpg_key['name']
@pytest.mark.tier1
def test_positive_list(module_org):
"""Create gpg key and list it
:id: ca69e23b-ca96-43dd-89a6-55b0e4ea322d
:expectedresults: gpg key is in the list
:CaseImportance: Critical
"""
gpg_key = make_content_credential(
{'key': VALID_GPG_KEY_FILE_PATH, 'organization-id': module_org.id}
)
gpg_keys_list = ContentCredential.list({'organization-id': module_org.id})
assert gpg_key['id'] in [gpg['id'] for gpg in gpg_keys_list]
@pytest.mark.parametrize('name', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_search(name, module_org):
"""Create gpg key and search for it
:id: f72648f1-b468-4662-9653-3464e7d0c349
:parametrized: yes
:expectedresults: gpg key can be found
:CaseImportance: Critical
"""
gpg_key = make_content_credential(
{
'key': VALID_GPG_KEY_FILE_PATH,
'name': name,
'organization-id': module_org.id,
}
)
# Can we find the new object?
result = ContentCredential.exists(
{'organization-id': module_org.id}, search=('name', gpg_key['name'])
)
assert gpg_key['name'] == result['name']
| jyejare/robottelo | tests/foreman/cli/test_content_credentials.py | Python | gpl-3.0 | 33,540 |
import sys
import event
class ducklingScriptParser():
#Split the timeString (mmss) from the EventString (xxx)
"""
"""
def __init__(self):
pass
@staticmethod
def splitEvent(strEvent):
"""
@param strEvent:
@return:
"""
i = 0
while strEvent[i].isdigit():
i += 1
return strEvent[0:i], strEvent[i:]
#Returns the time
@staticmethod
def convertTime(timeStr):
"""
@param timeStr:
@return:
"""
if len(timeStr) == 3:
return int(timeStr[0]) * 60 + int(timeStr[1:3])
elif len(timeStr) == 4:
return int(timeStr[0:2]) * 60 + int(timeStr[2:4])
print 'Error in time of timeStr: ' + timeStr
sys.exit(1)
@staticmethod
def strThreatToEventThreat(strThreat):
"""
@param strThreat:
@return:
"""
if strThreat == 'T':
return 'threat_normal'
elif strThreat == 'ST':
return 'threat_serious'
elif strThreat == 'IT':
return 'internal_normal'
elif strThreat == 'SIT':
return 'internal_serious'
else:
print "ERROR unkown threat code, make it a proper exception!"
print "threatstr: " + strThreat
@staticmethod
def strZonetoEventZone(strZone):
"""
@param strZone:
@return:
"""
if strZone == 'R':
return 'zone_red'
elif strZone == 'W':
return 'zone_white'
elif strZone == 'B':
return 'zone_blue'
else:
print "ERROR unkown zone code, make it a proper exception!"
print "threatstr: " + strZone
def parseEventStr(self, eventStr):
"""
@param eventStr:
@return:
"""
eventList = []
(timeStr, eventStr) = self.splitEvent(eventStr)
time = self.convertTime(timeStr)
strType = eventStr[0:2]
strParams = eventStr[2:]
if strType == "PE":
eventList.append((time - 60, event.phaseEnds(int(strParams), '1min')))
eventList.append((time - 20, event.phaseEnds(int(strParams), '20s')))
eventList.append((time - 7, event.phaseEnds(int(strParams), 'now')))
elif strType == "AL":
turn = int(strParams[0])
threat = self.strThreatToEventThreat(strParams[1:len(strParams) - 1])
zone = self.strZonetoEventZone(strParams[-1])
eventList.append((time, event.alert(turn, threat, zone)))
pass
elif strType == "UR":
turn = int(strParams[0])
threat = self.strThreatToEventThreat(strParams[1:len(strParams) - 1])
zone = self.strZonetoEventZone(strParams[-1])
eventList.append((time, event.alert(turn, threat, zone, True)))
elif strType == "ID":
eventList.append((time, event.incomingData()))
elif strType == "DT":
eventList.append((time, event.dataTransfer()))
elif strType == "CS":
eventList.append((time, event.communicationSystemsDown(int(strParams))))
else:
print 'error unkown eventtype in script. TODO: make a proper exception from this.'
print 'eventtyp: ' + strType
#return the events
return eventList
def convertScript(self, script):
"""
@param script:
@return:
"""
lijst = script.split(',')
eventList = [(0, event.start())]
for eventStr in lijst:
events = self.parseEventStr(eventStr)
eventList.extend(events)
#Replace the last phase ends with mission ends
lastEvent = eventList[-1][1]
if isinstance(lastEvent,event.phaseEnds):
lastPhaseNumber = lastEvent.getPhaseNumber()
for time, eventItem in eventList:
if isinstance(eventItem, event.phaseEnds):
if eventItem.getPhaseNumber() == lastPhaseNumber:
eventItem.convertToEndMission()
else:
print 'ERROR, the last event is not a phase end!'
return eventList
| nlaurens/SpaceAlert | ducklingScriptParser.py | Python | mit | 4,232 |
# Python library for Remember The Milk API
__author__ = 'Sridhar Ratnakumar <http://nearfar.org/>'
__all__ = (
'API',
'createRTM',
'set_log_level',
)
import urllib.request
import urllib.parse
import urllib.error
from hashlib import md5
from GTG import _
from GTG.tools.logger import Log
_use_jsonlib = False
try:
import simplejson as json
assert json
_use_jsonlib = True
except ImportError:
try:
import json as json
assert json
_use_jsonlib = True
except ImportError:
try:
from django.utils import simplejson as json
_use_jsonlib = True
except ImportError:
pass
if not _use_jsonlib:
Log.warning("simplejson module is not available, "
"falling back to the internal JSON parser. "
"Please consider installing the simplejson module from "
"http://pypi.python.org/pypi/simplejson.")
SERVICE_URL = 'http://api.rememberthemilk.com/services/rest/'
AUTH_SERVICE_URL = 'http://www.rememberthemilk.com/services/auth/'
class RTMError(Exception):
pass
class RTMAPIError(RTMError):
pass
class AuthStateMachine(object):
class NoData(RTMError):
pass
def __init__(self, states):
self.states = states
self.data = {}
def dataReceived(self, state, datum):
if state not in self.states:
error_string = _("Invalid state") + " <%s>"
raise RTMError(error_string % state)
self.data[state] = datum
def get(self, state):
if state in self.data:
return self.data[state]
else:
raise AuthStateMachine.NoData('No data for <%s>' % state)
class RTM(object):
def __init__(self, apiKey, secret, token=None):
self.apiKey = apiKey
self.secret = secret
self.authInfo = AuthStateMachine(['frob', 'token'])
# this enables one to do 'rtm.tasks.getList()', for example
for prefix, methods in list(API.items()):
setattr(self, prefix,
RTMAPICategory(self, prefix, methods))
if token:
self.authInfo.dataReceived('token', token)
def _sign(self, params):
"Sign the parameters with MD5 hash"
pairs = ''.join(['%s%s' % (k, v) for k, v in sortedItems(params)])
return md5((self.secret + pairs).encode('utf-8')).hexdigest()
def get(self, **params):
"Get the XML response for the passed `params`."
params['api_key'] = self.apiKey
params['format'] = 'json'
params['api_sig'] = self._sign(params)
json_data = openURL(SERVICE_URL, params).read()
# LOG.debug("JSON response: \n%s" % json)
if _use_jsonlib:
data = dottedDict('ROOT', json.loads(json_data.decode('utf-8')))
else:
data = dottedJSON(json_data.decode('utf-8'))
rsp = data.rsp
if rsp.stat == 'fail':
raise RTMAPIError('API call failed - %s (%s)' % (
rsp.err.msg, rsp.err.code))
else:
return rsp
def getNewFrob(self):
rsp = self.get(method='rtm.auth.getFrob')
self.authInfo.dataReceived('frob', rsp.frob)
return rsp.frob
def getAuthURL(self):
try:
frob = self.authInfo.get('frob')
except AuthStateMachine.NoData:
frob = self.getNewFrob()
params = {
'api_key': self.apiKey,
'perms': 'delete',
'frob': frob
}
params['api_sig'] = self._sign(params)
return AUTH_SERVICE_URL + '?' + urllib.parse.urlencode(params)
def getToken(self):
frob = self.authInfo.get('frob')
rsp = self.get(method='rtm.auth.getToken', frob=frob)
self.authInfo.dataReceived('token', rsp.auth.token)
return rsp.auth.token
class RTMAPICategory:
"See the `API` structure and `RTM.__init__`"
def __init__(self, rtm, prefix, methods):
self.rtm = rtm
self.prefix = prefix
self.methods = methods
def __getattr__(self, attr):
if attr in self.methods:
rargs, oargs = self.methods[attr]
if self.prefix == 'tasksNotes':
aname = 'rtm.tasks.notes.%s' % attr
else:
aname = 'rtm.%s.%s' % (self.prefix, attr)
return lambda **params: self.callMethod(
aname, rargs, oargs, **params)
else:
raise AttributeError('No such attribute: %s' % attr)
def callMethod(self, aname, rargs, oargs, **params):
# Sanity checks
for requiredArg in rargs:
if requiredArg not in params:
raise TypeError(
'Required parameter (%s) missing' % requiredArg)
for param in params:
if param not in rargs + oargs:
Log.error('Invalid parameter (%s)' % param)
return self.rtm.get(method=aname,
auth_token=self.rtm.authInfo.get('token'),
**params)
# Utility functions
def sortedItems(dictionary):
"Return a list of (key, value) sorted based on keys"
keys = list(dictionary.keys())
keys.sort()
for key in keys:
yield key, dictionary[key]
def openURL(url, queryArgs=None):
if queryArgs:
url = url + '?' + urllib.parse.urlencode(queryArgs)
# LOG.debug("URL> %s", url)
return urllib.request.urlopen(url)
class dottedDict(object):
"""Make dictionary items accessible via the object-dot notation."""
def __init__(self, name, dictionary):
self._name = name
if type(dictionary) is dict:
for key, value in list(dictionary.items()):
if type(value) is dict:
value = dottedDict(key, value)
elif type(value) in (list, tuple) and key != 'tag':
value = [dottedDict('%s_%d' % (key, i), item)
for i, item in indexed(value)]
setattr(self, key, value)
else:
raise ValueError('not a dict: %s' % dictionary)
def __repr__(self):
children = [c for c in dir(self) if not c.startswith('_')]
return 'dotted <%s> : %s' % (
self._name,
', '.join(children))
def safeEval(string):
return eval(string, {}, {})
def dottedJSON(json):
return dottedDict('ROOT', safeEval(json))
def indexed(seq):
index = 0
for item in seq:
yield index, item
index += 1
# API spec
API = {
'auth': {
'checkToken':
[('auth_token',), ()],
'getFrob':
[(), ()],
'getToken':
[('frob',), ()]
},
'contacts': {
'add': [('timeline', 'contact'), ()],
'delete': [('timeline', 'contact_id'), ()],
'getList': [(), ()]
},
'groups': {
'add': [('timeline', 'group'), ()],
'addContact': [('timeline', 'group_id', 'contact_id'), ()],
'delete': [('timeline', 'group_id'), ()],
'getList': [(), ()],
'removeContact': [('timeline', 'group_id', 'contact_id'), ()],
},
'lists': {
'add': [('timeline', 'name',), ('filter',)],
'archive': [('timeline', 'list_id'), ()],
'delete': [('timeline', 'list_id'), ()],
'getList': [(), ()],
'setDefaultList': [('timeline'), ('list_id')],
'setName': [('timeline', 'list_id', 'name'), ()],
'unarchive': [('timeline',), ('list_id',)]
},
'locations': {
'getList': [(), ()]
},
'reflection': {
'getMethodInfo': [('methodName',), ()],
'getMethods': [(), ()]
},
'settings': {
'getList': [(), ()]
},
'tasks': {
'add': [('timeline', 'name',), ('list_id', 'parse',)],
'addTags': [
('timeline', 'list_id', 'taskseries_id', 'task_id', 'tags'), (),
],
'complete': [('timeline', 'list_id', 'taskseries_id', 'task_id',), ()],
'delete': [('timeline', 'list_id', 'taskseries_id', 'task_id'), ()],
'getList': [(), ('list_id', 'filter', 'last_sync')],
'movePriority': [
('timeline', 'list_id', 'taskseries_id', 'task_id', 'direction'),
(),
],
'moveTo': [
('timeline', 'from_list_id', 'to_list_id',
'taskseries_id', 'task_id'),
(),
],
'postpone':
[('timeline', 'list_id', 'taskseries_id', 'task_id'),
()],
'removeTags':
[('timeline', 'list_id', 'taskseries_id', 'task_id', 'tags'),
()],
'setDueDate':
[('timeline', 'list_id', 'taskseries_id', 'task_id'),
('due', 'has_due_time', 'parse')],
'setEstimate':
[('timeline', 'list_id', 'taskseries_id', 'task_id'),
('estimate',)],
'setLocation':
[('timeline', 'list_id', 'taskseries_id', 'task_id'),
('location_id',)],
'setName':
[('timeline', 'list_id', 'taskseries_id', 'task_id', 'name'),
()],
'setPriority':
[('timeline', 'list_id', 'taskseries_id', 'task_id'),
('priority',)],
'setRecurrence':
[('timeline', 'list_id', 'taskseries_id', 'task_id'),
('repeat',)],
'setTags':
[('timeline', 'list_id', 'taskseries_id', 'task_id'),
('tags',)],
'setURL':
[('timeline', 'list_id', 'taskseries_id', 'task_id'),
('url',)],
'uncomplete':
[('timeline', 'list_id', 'taskseries_id', 'task_id'),
()],
},
'tasksNotes': {
'add': [
('timeline', 'list_id', 'taskseries_id', 'task_id',
'note_title', 'note_text'), ()],
'delete': [('timeline', 'note_id'), ()],
'edit': [('timeline', 'note_id', 'note_title', 'note_text'), ()]
},
'test': {
'echo': [(), ()],
'login': [(), ()]
},
'time': {
'convert': [
('to_timezone',), ('from_timezone', 'to_timezone', 'time')],
'parse': [('text',), ('timezone', 'dateformat')]
},
'timelines': {
'create': [(), ()]
},
'timezones': {
'getList': [(), ()]
},
'transactions': {
'undo': [('timeline', 'transaction_id'), ()]
},
}
def createRTM(apiKey, secret, token=None):
rtm = RTM(apiKey, secret, token)
# if token is None:
# print 'No token found'
# print 'Give me access here:', rtm.getAuthURL()
# raw_input('Press enter once you gave access')
# print 'Note down this token for future use:', rtm.getToken()
return rtm
def test(apiKey, secret, token=None):
rtm = createRTM(apiKey, secret, token)
rspTasks = rtm.tasks.getList(filter='dueWithin:"1 week of today"')
print([t.name for t in rspTasks.tasks.list.taskseries])
print(rspTasks.tasks.list.id)
rspLists = rtm.lists.getList()
# print rspLists.lists.list
print([(x.name, x.id) for x in rspLists.lists.list])
def set_log_level(level):
'''Sets the log level of the logger used by the module.
>>> import rtm
>>> import logging
>>> rtm.set_log_level(logging.INFO)
'''
# LOG.setLevel(level)
| sagarghuge/recurringtask | GTG/backends/rtm/rtm.py | Python | gpl-3.0 | 11,239 |
#!/usr/bin/python
import sys
from participantCollection import ParticipantCollection
names = sys.argv[1::]
participants = ParticipantCollection()
for name in names:
if participants.hasParticipantNamed(name):
participants.participantNamed(name).hasCheckedIn = True
print "just checked in " + name
else:
print "*** WARNING: " + name + " is not present in participants.txt"
participants.save()
| foobarbazblarg/stayclean | stayclean-2017-january/checkin.py | Python | mit | 427 |
#
#
# MMP (Multiscale Modeling Platform) - EU Project
# funded by FP7 under NMP-2013-1.4-1 call with Grant agreement no: 604279
#
# Copyright (C) 2014-2016
# Ralph Altenfeld (Access e.V., Germany)
#
# This script code is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
#
"""
MICRESS interface configuration file
This file provides necessary information about the MICRESS installation
which should be steered by the interface.
:note: In this example, MICRESS will not be run directly.
All result data is pre-calculated
"""
micressPath = "../../MICRESS_Bin"
micressExec = micressPath + "/starter.sh"
micressLicense = "@..."
thermocalcLicense = "..."
| mupif/mupif | obsolete/APIs/micress/micressConfig.py | Python | lgpl-3.0 | 1,359 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Wires.Switch import Switch
class GroundDisconnector(Switch):
"""A manually operated or motor operated mechanical switching device used for isolating a circuit or equipment from Ground.A manually operated or motor operated mechanical switching device used for isolating a circuit or equipment from Ground.
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'GroundDisconnector' instance.
"""
super(GroundDisconnector, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
| rwl/PyCIM | CIM15/IEC61970/Wires/GroundDisconnector.py | Python | mit | 1,730 |
# -*- coding: utf-8 -*-
from .base import SqlNode
from .utils import _setup_joins_for_fields
# TODO: add function(function()) feature.
class SqlFunction(SqlNode):
sql_template = '%(function)s(%(field)s)'
sql_function = None
args = []
def __init__(self, field, *args, **kwargs):
self.field = field
self.args = args
self.extern_params = kwargs
@property
def field_parts(self):
return self.field.split("__")
def as_sql(self, qn, queryset):
"""
Return the aggregate/annotation rendered as sql.
"""
_setup_joins_for_fields(self.field_parts, self, queryset)
params = {}
if self.sql_function is not None:
params['function'] = self.sql_function
if isinstance(self.field, basestring):
params['field'] = qn(self.field)
elif isinstance(self.field, (tuple, list)):
_tbl, _fld = self.field
params['field'] = "%s.%s" % (qn(_tbl), qn(_fld))
else:
raise ValueError("Invalid field value")
params.update(self.extern_params)
return self.sql_template % params, self.args
| cr8ivecodesmith/django-orm-extensions-save22 | django_orm/core/sql/functions.py | Python | bsd-3-clause | 1,172 |
from discord import VoiceChannel, PCMVolumeTransformer, FFmpegPCMAudio
from discord.ext import commands
from discord.ext.commands import Context
from Cogs.Utils.custom_bot import CustomBot
from Cogs.Utils.youtube_downloader import YTDL_Source
class Music_Commands(object):
def __init__(self, bot:CustomBot):
self.bot = bot
self.queue = {} # GuildID: List[str]
@commands.command()
async def join(self, ctx:Context, *, channel:VoiceChannel):
'''
Joins a voice channel
'''
if ctx.voice_client is not None:
return await ctx.voice_client.move_to(channel)
await channel.connect()
@commands.command()
async def play(self, ctx:Context, *, url:str):
'''
Streams from a URL (almost anything youtube_dl supports)
'''
# Works out if it's connected to a VC
if ctx.voice_client is None:
if ctx.author.voice.channel:
await ctx.author.voice.channel.connect()
else:
await ctx.send('I\'m not currently connected to a voice channel.')
return
# If it's playing something, stop it
if ctx.voice_client.is_playing():
ctx.voice_client.stop()
# Create a YTDL object
player = await YTDL_Source.from_url(url, loop=self.bot.loop)
# Play it
ctx.voice_client.play(player, after=lambda e: print('Player error: %s' % e) if e else None)
# Boop it out to the user
await ctx.send('Now playing: {}'.format(player.title))
await ctx.send(player.data)
@commands.command()
async def volume(self, ctx:Context, volume:int):
'''
Changes the player's volume
'''
# Determine if connected
if ctx.voice_client is None:
await ctx.send('I\'m not currently connected to a voice channel.')
return
# Change the volume of the player
ctx.voice_client.source.volume = volume if volume <= 100 else 100 if volume >=0 else 0
await ctx.send('Changed volume to {}%'.format(volume))
@commands.command()
async def stop(self, ctx:Context):
'''
Stops and disconnects the bot from voice
'''
await ctx.voice_client.disconnect()
await ctx.send('Stopped, disconnected, and cleared queue.')
def setup(bot):
x = Music_Commands(bot)
bot.add_cog(x)
| 4Kaylum/Spar.cli | Cogs/_Music_Commands.py | Python | gpl-3.0 | 2,450 |
#### this is the server class as well as the starter script for MusicMashup.
# server is build upon cherrypy
import cherrypy
import os
# the actual program logic is handled by the artist class
from MusicMashupArtist import MusicMashupArtist
# template engine
from mako.template import Template
from mako.lookup import TemplateLookup
# used for history / breadcrumbs generation
from urllib import quote_plus
# to make band names pretty in breadcrumbs
from titlecase import titlecase
class MusicMashupServer(object):
def __init__(self):
pass
@cherrypy.expose # tell cherrypy to map this function on "/" URL
def index(self, query="", soloartist=0):
# initialize mako (template engine)
lookup = TemplateLookup(directories=['html'])
# show search page if no query has been made
if query == "":
print "[~] No query given, serving search page"
tmpl = lookup.get_template("search.htm")
return tmpl.render()
# query is present. could be a dbpedia URL or text search, artist class will deal with that
else:
### create musicmashup object based on query. this handles everything but the breadcrumbs.
self.artist = MusicMashupArtist(query)
### save things to breadcrumbs and prepare them to be shown on the page
# add new query to breadcrumbs list. create as list if not existing yet
if not "history" in cherrypy.session:
cherrypy.session['history'] = []
# new search -> new breadcrumbs
if not query[:4] == "http": # if it's not a URL, it must be text search
cherrypy.session['history'] = [] # reset history list
# also, if name rather than query, convert to titlecase
query = titlecase(query)
# append newest query to list, template will determine if it's a URI or name
if not (len(cherrypy.session['history']) > 0 and cherrypy.session['history'][-1] == query):
cherrypy.session['history'].append(query)
# load mako templates
tmpl = lookup.get_template("main.htm")
# add whole Artist object and history array from sessions
# all queries etc. will be triggered from within the template.
# basically, all information is fetched "on demand" by the artist object
return tmpl.render(artist=self.artist, history=cherrypy.session['history'])
# End of class
#### Startup script
if __name__ == '__main__':
print ("[~] Initializing...")
# configure cherrypy entity, especially turn on session handling and define ./static
# as the folder to serve images, css etc.
# bind to all IPv4 interfaces
cherrypy.config.update({'server.socket_host': '0.0.0.0'})
conf = {
'/': {
'tools.sessions.on': True,
'tools.staticdir.root': os.path.abspath(os.getcwd())
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': './static'
}
}
# start the server based on the functions of the MM-Server class
cherrypy.quickstart(MusicMashupServer(), '/', conf) | kaozente/MusicMashup | MusicMashupServer.py | Python | mit | 2,945 |
#!/usr/bin/env python3
import time
import sys
import os
import math
import argparse
import matplotlib.pyplot as plt
# if PAPARAZZI_HOME not set, then assume the tree containing this
# file is a reasonable substitute
PPRZ_HOME = os.getenv("PAPARAZZI_HOME", os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../')))
sys.path.append(PPRZ_HOME + "/sw/ground_segment/python/natnet3.x")
from NatNetClient import NatNetClient
# This is a callback function that gets connected to the NatNet client. It is called once per rigid body per frame
def receiveRigidBodyFrame(id, position, rotation):
# print( "Received frame for rigid body", id )
global pos_x, pos_y, pos_z
global track_id
if track_id and id != track_id:
return
pos_x = position[0]
pos_y = position[1]
pos_z = position[2]
def main(args):
global track_id
track_id = args.id
global pos_x, pos_y, pos_z
pos_x, pos_y, pos_z = 0.0, 0.0, 0.0
fig = plt.figure()
plt.axis([-6, 6, -6, 6])
# This will create a new NatNet client
streamingClient = NatNetClient(
server=args.server,
multicast=args.multicast,
commandPort=args.commandPort,
dataPort=args.dataPort,
rigidBodyListListener=receiveRigidBodyFrame)
# Start up the streaming client now that the callbacks are set up.
# This will run perpetually, and operate on a separate thread.
streamingClient.run()
time.sleep(2)
print('Start tracking')
if args.outputfile:
file = open(args.outputfile, 'w')
file.write('timestamp, x, y, z\n')
old_z = pos_z
old_x = pos_x
distance = 0
start_time = time.time()
pre_time = time.time()
while plt.fignum_exists(fig.number):
if args.outputfile:
data = '{}, {}, {}, {}\n'.format(int((time.time() - start_time) * 1000), pos_x, pos_y, pos_z)
file.write(data)
h = math.hypot(pos_z - old_z, pos_x - old_x)
if h > 0.20:
distance += h
old_z = pos_z
old_x = pos_x
if time.time() - pre_time > 0.5:
print("distance:%3.4f m; time_step:%d" % (distance, int((time.time() - start_time) * 2)))
pre_time = time.time()
plt.plot(pos_z, pos_x, 'ro')
plt.draw()
plt.pause(0.001)
time.sleep(0.01)
streamingClient.stop()
if args.outputfile:
file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--server', default="169.254.201.120")
parser.add_argument('--multicast', default="239.255.42.99")
parser.add_argument('--commandPort', type=int, default=1510)
parser.add_argument('--dataPort', type=int, default=1511)
parser.add_argument('--id', type=int, default=None)
parser.add_argument('--outputfile', type=str, default=None)
args = parser.parse_args()
main(args)
| HWal/paparazzi | sw/tools/opti_dist/dist.py | Python | gpl-2.0 | 3,072 |
import unittest
import unittest.mock as mock
from tower_of_hanoi import Disk, Game, Peg
class DiskTestCase(unittest.TestCase):
def test____eq____when_self_equals_other__returns_true(self):
self.assertEqual(Disk(1), Disk(1))
def test____eq____when_self_size_not_equals_other__returns_false(self):
self.assertNotEqual(Disk(1), Disk(2))
def test____lt____when_self_equals_other__returns_false(self):
self.assertFalse(Disk(1) < Disk(1))
def test____lt____when_self_greater_than_other__returns_false(self):
self.assertFalse(Disk(2) < Disk(1))
def test____lt____when_self_less_than_other__returns_true(self):
self.assertTrue(Disk(1) < Disk(2))
class PegTestCase(unittest.TestCase):
def _create_peg(self, name=None, disks=[]):
return Peg(name if None else self._name, disks)
def setUp(self):
self._disk_1 = Disk(1)
self._disk_2 = Disk(2)
self._disk_3 = Disk(3)
self._name = 'name'
def test____eq____when_self_equals_other__returns_true(self):
self.assertEqual(Peg(self._name, [self._disk_1]), Peg(self._name, [self._disk_1]))
def test____eq____when_self_disks_not_equals_other__returns_false(self):
self.assertNotEqual(Peg(self._name, [self._disk_1]), Peg(self._name, [self._disk_2]))
def test____eq____when_self_name_not_equals_other__returns_false(self):
self.assertNotEqual(Peg(self._name, [self._disk_1]), Peg('other-name', [self._disk_1]))
def test__disks__returns_copy(self):
peg = self._create_peg()
peg.disks().append(self._disk_1)
self.assertEqual([], peg.disks())
def test__disks__returns_in_order_from_bottom_to_top(self):
peg = self._create_peg(disks=[self._disk_3, self._disk_2, self._disk_1])
self.assertEqual([self._disk_3, self._disk_2, self._disk_1], peg.disks())
def test__is_empty__when_empty__returns_true(self):
peg = self._create_peg()
self.assertTrue(peg.is_empty())
def test__is_empty__when_not_empty__returns_false(self):
peg = self._create_peg(disks=[self._disk_1])
self.assertFalse(peg.is_empty())
def test__pop__when_empty__raises_exception(self):
peg = self._create_peg()
with self.assertRaises(Exception):
peg.pop()
def test__pop__when_not_empty__removes_top_disk(self):
peg = self._create_peg(disks=[self._disk_2, self._disk_1])
popped_disk = peg.pop()
self.assertEqual([self._disk_2], peg.disks())
self.assertEqual(self._disk_1, popped_disk)
def test__push__when_empty__adds_disk(self):
peg = self._create_peg()
peg.push(self._disk_1)
self.assertEqual([self._disk_1], peg.disks())
def test__push__when_disk_smaller_than_top_disk__adds_disk_to_top(self):
peg = self._create_peg(disks=[self._disk_2])
peg.push(self._disk_1)
self.assertEqual([self._disk_2, self._disk_1], peg.disks())
def test__push__when_disk_same_as_top_disk__raises_exception(self):
peg = self._create_peg(disks=[self._disk_1])
with self.assertRaises(Exception):
peg.push(self._disk_1)
def test__push__when_disk_larger_than_top_disk__raises_exception(self):
peg = self._create_peg(disks=[self._disk_1])
with self.assertRaises(Exception):
peg.push(self._disk_2)
class GameTestCase(unittest.TestCase):
class _MoveSpy(mock.Mock):
'''
A test spy that can be passed as the `callback` parameter of the
`Game.move` method.
Because `Peg`s are mutable, we must copy the peg arguments during each
call instead of storing them directly. Otherwise, all calls will
reflect the final state of the pegs.
'''
def _mock_call(self, *args, **kwargs):
import copy
args_copy = copy.deepcopy(args)
kwargs_copy = copy.deepcopy(kwargs)
return super()._mock_call(*args_copy, **kwargs_copy)
def _create_peg_a(self, disks):
return Peg('a', disks)
def _create_peg_b(self, disks=[]):
return Peg('b', disks)
def _create_peg_c(self, disks=[]):
return Peg('c', disks)
def setUp(self):
self._disk_1 = Disk(1)
self._disk_2 = Disk(2)
self._disk_3 = Disk(3)
self._disk_4 = Disk(4)
self._peg_b = self._create_peg_b()
self._peg_c = self._create_peg_c()
self._game = Game()
def test__create_peg__returns_peg_with_specified_name(self):
name = 'name'
peg = self._game.create_peg(name)
self.assertEqual(name, peg.name())
def test__create_peg__when_disk_count_is_0__returns_empty_peg(self):
peg = self._game.create_peg('name', 0)
self.assertEqual([], peg.disks())
def test__create_peg__when_disk_count_is_1__returns_peg_with_1_disk(self):
peg = self._game.create_peg('name', 1)
self.assertEqual([self._disk_1], peg.disks())
def test__create_peg__when_disk_count_is_3__returns_peg_with_3_disks_in_ascending_order_from_top(self):
peg = self._game.create_peg('name', 3)
self.assertEqual([self._disk_3, self._disk_2, self._disk_1], peg.disks())
def test__move__when_disk_count_is_1__invokes_callback_after_each_move(self):
move_spy = GameTestCase._MoveSpy()
peg_a = self._create_peg_a([self._disk_1])
self._game.move(1, peg_a, self._peg_c, self._peg_b, move_spy)
expected_move_spy_call_args_list = [
mock.call([
self._create_peg_a([]),
self._create_peg_c([self._disk_1]),
self._create_peg_b([])
])
]
self.assertEqual(expected_move_spy_call_args_list, move_spy.call_args_list)
def test__move__when_disk_count_is_1__moves_disks_from_peg_a_to_peg_c(self):
peg_a = self._create_peg_a([self._disk_1])
self._game.move(1, peg_a, self._peg_c, self._peg_b)
self.assertEqual(self._create_peg_a([]), peg_a)
self.assertEqual(self._create_peg_b([]), self._peg_b)
self.assertEqual(self._create_peg_c([self._disk_1]), self._peg_c)
def test__move__when_disk_count_is_2__invokes_callback_after_each_move(self):
move_spy = GameTestCase._MoveSpy()
peg_a = self._create_peg_a([self._disk_2, self._disk_1])
self._game.move(2, peg_a, self._peg_c, self._peg_b, move_spy)
expected_move_spy_call_args_list = [
mock.call([
self._create_peg_a([self._disk_2]),
self._create_peg_b([self._disk_1]),
self._create_peg_c([])
]),
mock.call([
self._create_peg_a([]),
self._create_peg_c([self._disk_2]),
self._create_peg_b([self._disk_1])
]),
mock.call([
self._create_peg_b([]),
self._create_peg_c([self._disk_2, self._disk_1]),
self._create_peg_a([])
])
]
self.assertEqual(expected_move_spy_call_args_list, move_spy.call_args_list)
def test__move__when_disk_count_is_2__moves_disks_from_peg_a_to_peg_c(self):
peg_a = self._create_peg_a([self._disk_2, self._disk_1])
self._game.move(2, peg_a, self._peg_c, self._peg_b)
self.assertEqual(self._create_peg_a([]), peg_a)
self.assertEqual(self._create_peg_b([]), self._peg_b)
self.assertEqual(self._create_peg_c([self._disk_2, self._disk_1]), self._peg_c)
def test__move__when_disk_count_is_3__moves_disks_from_peg_a_to_peg_c(self):
peg_a = self._create_peg_a([self._disk_3, self._disk_2, self._disk_1])
self._game.move(3, peg_a, self._peg_c, self._peg_b)
self.assertEqual(self._create_peg_a([]), peg_a)
self.assertEqual(self._create_peg_b([]), self._peg_b)
self.assertEqual(self._create_peg_c([self._disk_3, self._disk_2, self._disk_1]), self._peg_c)
def test__move__when_disk_count_is_4__moves_disks_from_peg_a_to_peg_c(self):
peg_a = self._create_peg_a([self._disk_4, self._disk_3, self._disk_2, self._disk_1])
self._game.move(4, peg_a, self._peg_c, self._peg_b)
self.assertEqual(self._create_peg_a([]), peg_a)
self.assertEqual(self._create_peg_b([]), self._peg_b)
self.assertEqual(self._create_peg_c([self._disk_4, self._disk_3, self._disk_2, self._disk_1]), self._peg_c)
def test__move__when_disk_count_exceeds_source_peg_disk_count__raises_exception(self):
peg_a = self._create_peg_a([self._disk_1])
with self.assertRaises(Exception):
self._game.move(2, peg_a, self._peg_c, self._peg_b)
if __name__ == '__main__':
unittest.main()
| ssoloff/tower-of-hanoi | imperative/test/test_tower_of_hanoi.py | Python | gpl-3.0 | 8,787 |
"""
https://en.wikipedia.org/wiki/Square_root_of_a_matrix
B is the sqrt of a matrix A if B*B = A
"""
import numpy as np
from scipy.linalg import sqrtm
from scipy.stats import special_ortho_group
def denman_beaver(A, n=50):
Y = A
Z = np.eye(len(A))
for i in range(n):
Yn = 0.5*(Y + np.linalg.inv(Z))
Zn = 0.5*(Z + np.linalg.inv(Y))
Y = Yn
Z = Zn
return (Y, Z)
def babylonian(A, n=50):
X = np.eye(len(A))
for i in range(n):
X = 0.5*(X + np.dot(A, np.linalg.inv(X)))
return X
def gen_random_matrix(n):
return np.random.rand(n, n)
def gen_rotation_matrix(n):
return special_ortho_group.rvs(n)*np.random.randint(-100, 101)
def gen_symmetric_matrix(n):
A = np.random.randint(-10, 11, size=(n, n))
A = 0.5*(A + A.T)
return A
def test(title, gen_matrix, size, iters):
print("Testing {} matrix".format(title))
for i in range(1, size):
for j in range(iters):
try:
A = gen_matrix(i)
d = np.linalg.det(A)
Y, _ = denman_beaver(A)
X = babylonian(A)
Z = sqrtm(A)
print("{}x{} matrix (det {})".format(i, i, d))
print(A)
print("Denman Beaver")
print(np.dot(Y, Y))
print("Babylonian")
print(np.dot(X, X))
print("Scipy")
print(np.dot(Z, Z))
print()
except:
pass
# iteration methods above tend to fail on random and symmetric matrices
test("random", gen_random_matrix, 5, 10)
test("symmetric", gen_symmetric_matrix, 5, 10)
# for rotation matrices, the iteration methods work
test("rotation", gen_rotation_matrix, 5, 10)
| qeedquan/misc_utilities | math/matrix-sqrt.py | Python | mit | 1,781 |
import sys
def dictContains(D, key):
if sys.version_info[0] == 2:
return D.has_key(key)
elif sys.version_info[0] == 3:
return key in D
else:
raise Exception("No support for self.__dictContains for python major " +
"version: {}".format(sys.version_info[0]))
| Quva/sparkplug | sparkplug/helpers/helpers.py | Python | apache-2.0 | 339 |
"""Support for OpenTherm Gateway sensors."""
import logging
from homeassistant.components.sensor import ENTITY_ID_FORMAT
from homeassistant.const import DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from . import DATA_GW_VARS, DATA_OPENTHERM_GW, SIGNAL_OPENTHERM_GW_UPDATE
_LOGGER = logging.getLogger(__name__)
UNIT_BAR = 'bar'
UNIT_HOUR = 'h'
UNIT_KW = 'kW'
UNIT_L_MIN = 'L/min'
UNIT_PERCENT = '%'
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the OpenTherm Gateway sensors."""
if discovery_info is None:
return
gw_vars = hass.data[DATA_OPENTHERM_GW][DATA_GW_VARS]
sensor_info = {
# [device_class, unit, friendly_name]
gw_vars.DATA_CONTROL_SETPOINT: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, "Control Setpoint"],
gw_vars.DATA_MASTER_MEMBERID: [None, None, "Thermostat Member ID"],
gw_vars.DATA_SLAVE_MEMBERID: [None, None, "Boiler Member ID"],
gw_vars.DATA_SLAVE_OEM_FAULT: [None, None, "Boiler OEM Fault Code"],
gw_vars.DATA_COOLING_CONTROL: [
None, UNIT_PERCENT, "Cooling Control Signal"],
gw_vars.DATA_CONTROL_SETPOINT_2: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, "Control Setpoint 2"],
gw_vars.DATA_ROOM_SETPOINT_OVRD: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, "Room Setpoint Override"],
gw_vars.DATA_SLAVE_MAX_RELATIVE_MOD: [
None, UNIT_PERCENT, "Boiler Maximum Relative Modulation"],
gw_vars.DATA_SLAVE_MAX_CAPACITY: [
None, UNIT_KW, "Boiler Maximum Capacity"],
gw_vars.DATA_SLAVE_MIN_MOD_LEVEL: [
None, UNIT_PERCENT, "Boiler Minimum Modulation Level"],
gw_vars.DATA_ROOM_SETPOINT: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, "Room Setpoint"],
gw_vars.DATA_REL_MOD_LEVEL: [
None, UNIT_PERCENT, "Relative Modulation Level"],
gw_vars.DATA_CH_WATER_PRESS: [
None, UNIT_BAR, "Central Heating Water Pressure"],
gw_vars.DATA_DHW_FLOW_RATE: [None, UNIT_L_MIN, "Hot Water Flow Rate"],
gw_vars.DATA_ROOM_SETPOINT_2: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, "Room Setpoint 2"],
gw_vars.DATA_ROOM_TEMP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, "Room Temperature"],
gw_vars.DATA_CH_WATER_TEMP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS,
"Central Heating Water Temperature"],
gw_vars.DATA_DHW_TEMP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, "Hot Water Temperature"],
gw_vars.DATA_OUTSIDE_TEMP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, "Outside Temperature"],
gw_vars.DATA_RETURN_WATER_TEMP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS,
"Return Water Temperature"],
gw_vars.DATA_SOLAR_STORAGE_TEMP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS,
"Solar Storage Temperature"],
gw_vars.DATA_SOLAR_COLL_TEMP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS,
"Solar Collector Temperature"],
gw_vars.DATA_CH_WATER_TEMP_2: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS,
"Central Heating 2 Water Temperature"],
gw_vars.DATA_DHW_TEMP_2: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, "Hot Water 2 Temperature"],
gw_vars.DATA_EXHAUST_TEMP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, "Exhaust Temperature"],
gw_vars.DATA_SLAVE_DHW_MAX_SETP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS,
"Hot Water Maximum Setpoint"],
gw_vars.DATA_SLAVE_DHW_MIN_SETP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS,
"Hot Water Minimum Setpoint"],
gw_vars.DATA_SLAVE_CH_MAX_SETP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS,
"Boiler Maximum Central Heating Setpoint"],
gw_vars.DATA_SLAVE_CH_MIN_SETP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS,
"Boiler Minimum Central Heating Setpoint"],
gw_vars.DATA_DHW_SETPOINT: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, "Hot Water Setpoint"],
gw_vars.DATA_MAX_CH_SETPOINT: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS,
"Maximum Central Heating Setpoint"],
gw_vars.DATA_OEM_DIAG: [None, None, "OEM Diagnostic Code"],
gw_vars.DATA_TOTAL_BURNER_STARTS: [
None, None, "Total Burner Starts"],
gw_vars.DATA_CH_PUMP_STARTS: [
None, None, "Central Heating Pump Starts"],
gw_vars.DATA_DHW_PUMP_STARTS: [None, None, "Hot Water Pump Starts"],
gw_vars.DATA_DHW_BURNER_STARTS: [
None, None, "Hot Water Burner Starts"],
gw_vars.DATA_TOTAL_BURNER_HOURS: [
None, UNIT_HOUR, "Total Burner Hours"],
gw_vars.DATA_CH_PUMP_HOURS: [
None, UNIT_HOUR, "Central Heating Pump Hours"],
gw_vars.DATA_DHW_PUMP_HOURS: [None, UNIT_HOUR, "Hot Water Pump Hours"],
gw_vars.DATA_DHW_BURNER_HOURS: [
None, UNIT_HOUR, "Hot Water Burner Hours"],
gw_vars.DATA_MASTER_OT_VERSION: [
None, None, "Thermostat OpenTherm Version"],
gw_vars.DATA_SLAVE_OT_VERSION: [
None, None, "Boiler OpenTherm Version"],
gw_vars.DATA_MASTER_PRODUCT_TYPE: [
None, None, "Thermostat Product Type"],
gw_vars.DATA_MASTER_PRODUCT_VERSION: [
None, None, "Thermostat Product Version"],
gw_vars.DATA_SLAVE_PRODUCT_TYPE: [None, None, "Boiler Product Type"],
gw_vars.DATA_SLAVE_PRODUCT_VERSION: [
None, None, "Boiler Product Version"],
gw_vars.OTGW_MODE: [None, None, "Gateway/Monitor Mode"],
gw_vars.OTGW_DHW_OVRD: [None, None, "Gateway Hot Water Override Mode"],
gw_vars.OTGW_ABOUT: [None, None, "Gateway Firmware Version"],
gw_vars.OTGW_BUILD: [None, None, "Gateway Firmware Build"],
gw_vars.OTGW_CLOCKMHZ: [None, None, "Gateway Clock Speed"],
gw_vars.OTGW_LED_A: [None, None, "Gateway LED A Mode"],
gw_vars.OTGW_LED_B: [None, None, "Gateway LED B Mode"],
gw_vars.OTGW_LED_C: [None, None, "Gateway LED C Mode"],
gw_vars.OTGW_LED_D: [None, None, "Gateway LED D Mode"],
gw_vars.OTGW_LED_E: [None, None, "Gateway LED E Mode"],
gw_vars.OTGW_LED_F: [None, None, "Gateway LED F Mode"],
gw_vars.OTGW_GPIO_A: [None, None, "Gateway GPIO A Mode"],
gw_vars.OTGW_GPIO_B: [None, None, "Gateway GPIO B Mode"],
gw_vars.OTGW_SB_TEMP: [
DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS,
"Gateway Setback Temperature"],
gw_vars.OTGW_SETP_OVRD_MODE: [
None, None, "Gateway Room Setpoint Override Mode"],
gw_vars.OTGW_SMART_PWR: [None, None, "Gateway Smart Power Mode"],
gw_vars.OTGW_THRM_DETECT: [None, None, "Gateway Thermostat Detection"],
gw_vars.OTGW_VREF: [None, None, "Gateway Reference Voltage Setting"],
}
sensors = []
for var in discovery_info:
device_class = sensor_info[var][0]
unit = sensor_info[var][1]
friendly_name = sensor_info[var][2]
entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, var, hass=hass)
sensors.append(
OpenThermSensor(entity_id, var, device_class, unit, friendly_name))
async_add_entities(sensors)
class OpenThermSensor(Entity):
"""Representation of an OpenTherm Gateway sensor."""
def __init__(self, entity_id, var, device_class, unit, friendly_name):
"""Initialize the OpenTherm Gateway sensor."""
self.entity_id = entity_id
self._var = var
self._value = None
self._device_class = device_class
self._unit = unit
self._friendly_name = friendly_name
async def async_added_to_hass(self):
"""Subscribe to updates from the component."""
_LOGGER.debug("Added OpenTherm Gateway sensor %s", self._friendly_name)
async_dispatcher_connect(self.hass, SIGNAL_OPENTHERM_GW_UPDATE,
self.receive_report)
async def receive_report(self, status):
"""Handle status updates from the component."""
value = status.get(self._var)
if isinstance(value, float):
value = '{:2.1f}'.format(value)
self._value = value
self.async_schedule_update_ha_state()
@property
def name(self):
"""Return the friendly name of the sensor."""
return self._friendly_name
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def state(self):
"""Return the state of the device."""
return self._value
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def should_poll(self):
"""Return False because entity pushes its state."""
return False
| jnewland/home-assistant | homeassistant/components/opentherm_gw/sensor.py | Python | apache-2.0 | 9,134 |
# This file is part of browser, and contains the default variables.
#
# Copyright (C) 2009-2010 Josiah Gordon <[email protected]>
#
# browser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Default global variables.
"""
import os
# Application name.
APP_NAME = 'webbrowser'
# Set the global main interface name.
MAIN_INTERFACE_NAME = "com.browser.main%d" % os.getpid()
# Define the interface used when connecting to a tab over dbus.
PLUG_INTERFACE_NAME = "com.browser.plug%d" % os.getpid()
| zepto/webbrowser | webbrowser/defaults.py | Python | gpl-3.0 | 1,073 |
import sys
from itertools import izip
from heinzel.core import connection
from heinzel.core import signals
from heinzel.core import relations
from heinzel.core.managers import Manager
from heinzel.core.fields import *
from heinzel.core.queries import BaseQuerySet
from heinzel.core.sql.dml import (SelectQuery, InsertQuery, DeleteQuery,
UpdateQuery, Q)
from heinzel.core.descriptors import (PrimaryKeyDescriptor,
DataFieldDescriptor, RelationDescriptor)
from heinzel.core.exceptions import (SQLSyntaxError, ValidationError,
DatabaseSanityError)
from heinzel import settings
# All models that need database access need to be put inside this list.
# E.g. `import models; models.register([YourModel1, YourModel2])`.
#? TODO: Wrap in class
registry = []
def setup_relations():
for m in registry:
for r in list(relations.registry.get_relations_for_model(m)):
other_m = r._get_other_model(m)
if not other_m in registry:
raise ValidationError(
"Model has not been registered with models.registry! "
"Model: %s. Relation: %s." %(other_m, r)
)
# Relation to self
if other_m is m:
# after this if, the next ident will be the reverse_identifier
# of the relation if other_m is m
ident = r.identifier
desc = RelationDescriptor(r, ident)
setattr(m, ident, desc)
m._relations.append(r)
ident = r._get_identifier_for_model(m)
desc = RelationDescriptor(r, ident)
setattr(m, ident, desc)
def register(models):
for m in models:
if m not in registry:
registry.append(m)
setup_relations()
class ModelBase(type):
def __new__(cls, name, bases, attrs):
new_class = super(ModelBase, cls).__new__(cls, name, bases, attrs)
if name == "Model":
#print "bailing out early..."
return new_class
m = Manager()
m.contribute_to_class(new_class, "objects")
# Don't forget to ’’setup_relations’’ after all Models have been constructed
new_class._relations = relations.RelationRegistry()
new_class.pk = PrimaryKeyDescriptor()
#register fields
new_class._fields = dict([(k, v) for k, v in new_class.__dict__.items() if isinstance(v, Field)])
try:
pk = new_class.get_primary_key()
except:
new_class._fields["id"] = IntegerField(primary_key=True)
new_class._primary_key = "id"
if new_class._primary_key != "id":
new_class._fields["id"] = IntegerField(primary_key=False, auto_increment=True)
# Register Relations, set Descriptors for data fields (non-RelationFields) and
# set related_names on those fields
for fname, f in new_class._fields.items():
if isinstance(f, RelationField):
f.set_related_name(new_class)
if isinstance(f, ForeignKeyField):
f.column_name = f.column_name or fname + "_id"
mode = relations.FK
elif isinstance(f, ManyToManyField):
mode = relations.M2M
elif isinstance(f, OneToOneField):
mode = relations.O2O
relations.registry.add_new(new_class, f.related_model, fname, f.related_name, mode)
else:
f.column_name = f.column_name or fname
# overwrite fields on class with datafield descriptors for the fields.
setattr(new_class, fname, DataFieldDescriptor(fname))
f.name = fname
# Has to be set after the other Fields got their column_name and name
new_class._fields["pk"] = new_class._fields[new_class._primary_key]
return new_class
class Model(object):
__metaclass__ = ModelBase
def __init__(self, **kwargs):
signals.fire("model-pre-init", instance=self, kwargs=kwargs)
for fname, f in self.fields().items():
if f.attrs.get("initial") is not None:
if callable(f.attrs["initial"]):
value = f.attrs["initial"]()
else:
value = f.attrs["initial"]
setattr(self, fname, value)
# Set any Field and non-Field parameters on this instance
for name, value in kwargs.items():
if name in self._relations.get_identifiers():
raise TypeError("'%s' refers to a RelationManager. "\
"RelationManagers can't be set on instantiating, "\
"because at that point, the instance has not been "\
"created and so has no id with which to link it to %s."\
% (name, value))
setattr(self, name, value)
signals.fire("model-post-init", instance=self, kwargs=kwargs)
def __new__(cls, **kwargs):
return super(Model, cls).__new__(cls)
def __setattr__(self, name, value):
field = self.get_field_by_column_name(name)
if field:
name = field.name
super(Model, self).__setattr__(name, value)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
field = self.get_field_by_column_name(name)
if field:
o = getattr(self, field.name)
if isinstance(o, Model):
return o.pk
return o
raise AttributeError("%s doesn't have attribute '%s'." %(self, name))
def __str__(self):
if getattr(self, "__unicode__", None):
# return unicode(self).encode(settings.DEFAULT_ENCODING)
return unicode(self).encode(settings.DEFAULT_ENCODING)
return repr(self)
def __unicode__(self):
return u"<%s instance at 0x%x>" %(self.__class__.__name__,
id(self))
def __eq__(self, other):
if self.pk is None or other.pk is None:
# If any instance has no pk value, compare by identity.
return self is other
return type(self) == type(other) and self.pk == other.pk
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def fields(cls):
return cls._fields
@classmethod
def many_related(cls):
return dict([(k, v) for k, v in cls.fields().items()
if isinstance(v, (ManyToManyField, OneToOneField))]
)
@classmethod
def non_many_related(cls):
return dict([(k, v) for k, v in cls.fields().items()
if k not in cls.many_related()]
)
@classmethod
def related(cls):
return dict(
[(k, v) for k, v in cls.fields().items()
if isinstance(v, RelationField)]
)
@classmethod
def non_related(cls):
return dict(
[(k, v) for k, v in cls.fields().items()
if k not in cls.related()]
)
@classmethod
def foreignkeys(cls):
return dict([(k, v) for k, v in cls.fields().items()
if isinstance(v, ForeignKeyField)]
)
@classmethod
def get_column_names(cls):
"""
The column names on the model's table. Since through the 'pk'
Field alias there are duplicate entries, make it a set.
"""
ret = set([v.column_name for v in cls.non_many_related().values()])
return ret
def get_column_values(self):
"""The values on this instance to be inserted (or updated) in the
database."""
return self.get_column_names_values().values()
def get_column_names_values(self):
"""Return a dict of all non many related fields' column_names as keys
and the instance's values on these fields as values."""
return dict([(k, getattr(self, k)) for k in self.get_column_names()])
def get_field_names_values(self):
d = {}
for k in self.non_many_related():
attr = getattr(self, k)
if isinstance(attr, Model):
attr = attr.pk
if attr is not None:
d.update(k=attr)
return d
@classmethod
def get_field_by_column_name(cls, name):
for f in cls.fields().values():
if f.column_name == name:
return f
def get_unique_fields(self):
d = {}
for k, v in self.non_many_related().items():
if v.primary_key or v.unique:
d.update(k=v)
return d
def get_non_unique_fields(self):
uniques = self.get_unique_fields().values()
d = {}
for k, v in self.non_many_related().items():
if not v in uniques:
d.update(k=v)
return d
def get_non_related_field_names_values(self):
d = dict(
((k, getattr(self, k)) for k in self.non_related())
)
return d
def save(self):
"""When overriding this method, do not forget to call the Model.save()
method and to return a tuple of (instance, created), where created
means, it the instance was newly INSERTED into the database.
"""
signals.fire("model-pre-save", instance=self)
inst, created = self._save()
signals.fire("model-post-save", instance=self, created=created)
return inst, created
def _save(self):
"""Here we save an instance of a model to the database. If the instance
does not have a value for it's 'id' field, an entry will be
INSERTed. 'created' will then be set to True.
When an entry already exists for the given values and given any
unique constraints, try to update that entry with any non-unique
columns. In case of an update, 'created' will be False.
"""
if not self.id:
## If this particular instance has no id, try to insert it into the
## database.
iq = InsertQuery(self)
res = iq.execute()
self.id = res.lastrowid
iq.commit()
created = True
## Now that the row was successfully updated or it was
## determined that an update was not necessary, get that row's
## id and put it into self
## Get all fields that are not RelationFields and whose instance
## value is not None. RelationFields get created afterwards, so
## they can't be included in the search for the “id“ of self at
## this point.
# searchvals = self.get_non_related_field_names_values()
## We need to look for the instance's id, so exclude those values
## from the where clause.
# searchvals.pop("pk")
# searchvals.pop(self.fields()["pk"].name)
# id_qs = type(self).objects.filter(**searchvals)
# results = id_qs.select("id")
## This should only be one...
# if len(results) != 1:
# print id_qs.query
# print results
# raise Exception("Model._save: Something wrong on selecting id")
# if not isinstance(results[0]["id"], int):
# raise Exception("Need exactly one result for 'id' query while "
# "updating model instance %s. Found %s:" % (self, len(self.id), results))
# self.id = results[0]["id"]
else:
## if it already has an id, try to update it.
try:
uq = UpdateQuery(self)
res = uq.execute()
uq.commit()
except:
raise
created = False
return self, created
def delete(self, force_delete=False):
signals.fire("model-pre-delete", instance=self)
instance, deleted = self._delete()
signals.fire("model-post-delete", instance=self, deleted=deleted)
return instance, deleted
def _delete(self):
if self.pk:
dq = DeleteQuery(type(self), {type(self).pk.column_name: self.pk})
dq.execute()
dq.commit()
deleted = True
else:
deleted = False
return self, deleted
def uncache(self):
signals.fire("model-do-not-cache", instance=self)
def reset(self, fieldname=None):
"""Roll back all changes to one or all of self's data fields. The
data fields will have the values as if they were freshly
instantiated. To roll back all data fields, leave ‘‘fieldname‘‘
set to 'None'.
"""
raise NotImplementedError
def undo(self, fieldname=None):
"""Undo the last change to a data field.
"""
raise NotImplementedError
def redo(self, fieldname=None):
"""Undo the last undo.
"""
raise NotImplementedError
@classmethod
def get_primary_key(cls):
"""
Returns the primary key of the model as a string.
If there is more than one field with the 'primary_key' property
set to 'True', an exception will be raised. Same holds if there
is no field with 'primary_key' set to 'True'.
"""
if not getattr(cls, "_primary_key", None):
r = []
for n, v in cls.fields().items():
if v.primary_key:
r.append(n)
if len(r) == 1:
cls._primary_key = r[0]
elif len(r) > 1:
raise Exception("Model %s has more than 1 primary key: %s" %(cls, l))
else:
raise Exception("Model %s has no primary key!" %cls)
return cls._primary_key
@classmethod
def tablename(cls):
"""
The tablename returned here is being used in the SQL generation.
"""
return getattr(cls, "_tablename", None) or cls.__name__.lower() + "s"
| kurvenschubser/pyheinzel | heinzel/core/models.py | Python | mit | 12,195 |
"""Fill empty added_at date
Revision ID: 71cacff30853
Revises: d26b4a2cc2ef
Create Date: 2016-04-04 16:43:45.040889
"""
# revision identifiers, used by Alembic.
revision = '71cacff30853'
down_revision = 'd26b4a2cc2ef'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from datetime import datetime
def upgrade():
when = datetime(2015, 1, 1)
op.execute('UPDATE customer SET added_at = \'2015-1-1\' WHERE added_at IS NULL')
op.execute('UPDATE hail SET added_at = \'2015-1-1\' WHERE added_at IS NULL')
op.execute('UPDATE "ADS" SET added_at = \'2015-1-1\' WHERE added_at IS NULL')
op.execute('UPDATE driver SET added_at = \'2015-1-1\' WHERE added_at IS NULL')
op.execute('UPDATE taxi SET added_at = \'2015-1-1\' WHERE added_at IS NULL')
op.execute('UPDATE vehicle_description SET added_at = \'2015-1-1\' WHERE added_at IS NULL')
def downgrade():
pass
| openmaraude/APITaxi | APITaxi_models2/migrations/versions/20160404_16:43:45_71cacff30853_fill_empty_added_at_date.py.py | Python | agpl-3.0 | 927 |
from .. import bar, manager, xcbq, window
import base
import xcb
from xcb.xproto import EventMask, SetMode
import atexit, struct
class Icon(window._Window):
_windowMask = EventMask.StructureNotify |\
EventMask.Exposure
def __init__(self, win, qtile, systray):
window._Window.__init__(self, win, qtile)
self.systray = systray
def _configure_icon(self, pos):
window.configure(x=self.offset+(self.icon_size*pos),
y=0, width=self.icon_size,
height=self.icon_size)
def handle_ConfigureNotify(self, event):
self.systray.draw()
return False
def handle_DestroyNotify(self, event):
wid = event.window
del(self.qtile.windowMap[wid])
del(self.systray.icons[wid])
self.systray.draw()
return False
handle_UnmapNotify = handle_DestroyNotify
class TrayWindow(window._Window):
_windowMask = EventMask.StructureNotify |\
EventMask.Exposure
def __init__(self, win, qtile, systray):
window._Window.__init__(self, win, qtile)
self.systray = systray
def handle_ClientMessage(self, event):
atoms = self.qtile.conn.atoms
opcode = xcb.xproto.ClientMessageData(event, 0, 20).data32[2]
data = xcb.xproto.ClientMessageData(event, 12, 20)
task = data.data32[2]
conn = self.qtile.conn.conn
parent = self.systray.bar.window.window
if opcode == atoms['_NET_SYSTEM_TRAY_OPCODE']:
w = xcbq.Window(self.qtile.conn, task)
icon = Icon(w, self.qtile, self.systray)
self.systray.icons[task] = icon
self.qtile.windowMap[task] = icon
# add icon window to the save-set, so it gets reparented
# to the root window when qtile dies
conn.core.ChangeSaveSet(SetMode.Insert, task)
conn.core.ReparentWindow(task, parent.wid, 0, 0)
conn.flush()
w.map()
return False
class Systray(base._Widget):
"""
A widget that manage system tray
"""
defaults = manager.Defaults(
('icon_size', 20, 'Icon width'),
('padding', 5, 'Padding between icons'),
)
def __init__(self, **config):
base._Widget.__init__(self, bar.CALCULATED, **config)
self.traywin = None
self.icons = {}
def click(self, x, y, button):
pass
def calculate_width(self):
width = len(self.icons) * (self.icon_size + self.padding) + self.padding
return width
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
self.qtile = qtile
self.bar = bar
atoms = qtile.conn.atoms
win = qtile.conn.create_window(-1, -1, 1, 1)
self.traywin = TrayWindow(win, self.qtile, self)
qtile.windowMap[win.wid] = self.traywin
qtile.conn.conn.core.SetSelectionOwner(
win.wid,
atoms['_NET_SYSTEM_TRAY_S0'],
xcb.CurrentTime
)
event = struct.pack('BBHII5I', 33, 32, 0, qtile.root.wid,
atoms['MANAGER'],
xcb.CurrentTime, atoms['_NET_SYSTEM_TRAY_S0'],
win.wid, 0, 0)
qtile.root.send_event(event, mask=EventMask.StructureNotify)
# cleanup before exit
atexit.register(self.cleanup)
def draw(self):
self.drawer.draw(self.offset, self.calculate_width())
for pos, icon in enumerate(self.icons.values()):
icon.place(
self.offset + (self.icon_size + self.padding)*pos + self.padding,
self.bar.height/2 - self.icon_size/2,
self.icon_size, self.icon_size,
0,
None
)
def cleanup(self):
atoms = self.qtile.conn.atoms
self.qtile.conn.conn.core.SetSelectionOwner(
0,
atoms['_NET_SYSTEM_TRAY_S0'],
xcb.CurrentTime,
)
self.traywin.hide()
| andrelaszlo/qtile | libqtile/widget/systray.py | Python | mit | 4,106 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import copy
import sys
from pants.base.build_environment import pants_release
from pants.option.arg_splitter import (GLOBAL_SCOPE, ArgSplitter, NoGoalHelp, OptionsHelp,
UnknownGoalHelp)
from pants.option.global_options import GlobalOptionsRegistrar
from pants.option.option_value_container import OptionValueContainer
from pants.option.parser_hierarchy import ParserHierarchy
from pants.option.scope import ScopeInfo
class Options(object):
"""The outward-facing API for interacting with options.
Supports option registration and fetching option values.
Examples:
The value in global scope of option '--foo-bar' (registered in global scope) will be selected
in the following order:
- The value of the --foo-bar flag in global scope.
- The value of the PANTS_DEFAULT_FOO_BAR environment variable.
- The value of the PANTS_FOO_BAR environment variable.
- The value of the foo_bar key in the [DEFAULT] section of pants.ini.
- The hard-coded value provided at registration time.
- None.
The value in scope 'compile.java' of option '--foo-bar' (registered in global scope) will be
selected in the following order:
- The value of the --foo-bar flag in scope 'compile.java'.
- The value of the --foo-bar flag in scope 'compile'.
- The value of the --foo-bar flag in global scope.
- The value of the PANTS_COMPILE_JAVA_FOO_BAR environment variable.
- The value of the PANTS_COMPILE_FOO_BAR environment variable.
- The value of the PANTS_DEFAULT_FOO_BAR environment variable.
- The value of the PANTS_FOO_BAR environment variable.
- The value of the foo_bar key in the [compile.java] section of pants.ini.
- The value of the foo_bar key in the [compile] section of pants.ini.
- The value of the foo_bar key in the [DEFAULT] section of pants.ini.
- The hard-coded value provided at registration time.
- None.
The value in scope 'compile.java' of option '--foo-bar' (registered in scope 'compile') will be
selected in the following order:
- The value of the --foo-bar flag in scope 'compile.java'.
- The value of the --foo-bar flag in scope 'compile'.
- The value of the PANTS_COMPILE_JAVA_FOO_BAR environment variable.
- The value of the PANTS_COMPILE_FOO_BAR environment variable.
- The value of the foo_bar key in the [compile.java] section of pants.ini.
- The value of the foo_bar key in the [compile] section of pants.ini.
- The value of the foo_bar key in the [DEFAULT] section of pants.ini
(because of automatic config file fallback to that section).
- The hard-coded value provided at registration time.
- None.
"""
@classmethod
def complete_scopes(cls, scope_infos):
"""Expand a set of scopes to include all enclosing scopes.
E.g., if the set contains `foo.bar.baz`, ensure that it also contains `foo.bar` and `foo`.
"""
ret = {GlobalOptionsRegistrar.get_scope_info()}
for scope_info in scope_infos:
ret.add(scope_info)
original_scopes = {si.scope for si in scope_infos}
for scope_info in scope_infos:
scope = scope_info.scope
while scope != '':
if scope not in original_scopes:
ret.add(ScopeInfo(scope, ScopeInfo.INTERMEDIATE))
scope = scope.rpartition('.')[0]
return ret
@classmethod
def create(cls, env, config, known_scope_infos, args=None, bootstrap_option_values=None):
"""Create an Options instance.
:param env: a dict of environment variables.
:param config: data from a config file (must support config.get[list](section, name, default=)).
:param known_scope_infos: ScopeInfos for all scopes that may be encountered.
:param args: a list of cmd-line args; defaults to `sys.argv` if None is supplied.
:param bootstrap_option_values: An optional namespace containing the values of bootstrap
options. We can use these values when registering other options.
"""
# We need parsers for all the intermediate scopes, so inherited option values
# can propagate through them.
complete_known_scope_infos = cls.complete_scopes(known_scope_infos)
splitter = ArgSplitter(complete_known_scope_infos)
args = sys.argv if args is None else args
goals, scope_to_flags, target_specs, passthru, passthru_owner = splitter.split_args(args)
if bootstrap_option_values:
target_spec_files = bootstrap_option_values.target_spec_files
if target_spec_files:
for spec in target_spec_files:
with open(spec) as f:
target_specs.extend(filter(None, [line.strip() for line in f]))
help_request = splitter.help_request
parser_hierarchy = ParserHierarchy(env, config, complete_known_scope_infos)
values_by_scope = {} # Arg values, parsed per-scope on demand.
bootstrap_option_values = bootstrap_option_values
known_scope_to_info = {s.scope: s for s in complete_known_scope_infos}
return cls(goals, scope_to_flags, target_specs, passthru, passthru_owner, help_request,
parser_hierarchy, values_by_scope, bootstrap_option_values, known_scope_to_info)
def __init__(self, goals, scope_to_flags, target_specs, passthru, passthru_owner, help_request,
parser_hierarchy, values_by_scope, bootstrap_option_values, known_scope_to_info):
"""The low-level constructor for an Options instance.
Dependees should use `Options.create` instead.
"""
self._goals = goals
self._scope_to_flags = scope_to_flags
self._target_specs = target_specs
self._passthru = passthru
self._passthru_owner = passthru_owner
self._help_request = help_request
self._parser_hierarchy = parser_hierarchy
self._values_by_scope = values_by_scope
self._bootstrap_option_values = bootstrap_option_values
self._known_scope_to_info = known_scope_to_info
def drop_flag_values(self):
"""Returns a copy of these options that ignores values specified via flags.
Any pre-cached option values are cleared and only option values that come from option defaults,
the config or the environment are used.
"""
# An empty scope_to_flags to force all values to come via the config -> env hierarchy alone
# and empty values in case we already cached some from flags.
no_flags = {}
no_values = {}
return Options(self._goals,
no_flags,
self._target_specs,
self._passthru,
self._passthru_owner,
self._help_request,
self._parser_hierarchy,
no_values,
self._bootstrap_option_values,
self._known_scope_to_info)
@property
def target_specs(self):
"""The targets to operate on."""
return self._target_specs
@property
def goals(self):
"""The requested goals, in the order specified on the cmd line."""
return self._goals
def is_known_scope(self, scope):
"""Whether the given scope is known by this instance."""
return scope in self._known_scope_to_info
def passthru_args_for_scope(self, scope):
# Passthru args "belong" to the last scope mentioned on the command-line.
# Note: If that last scope is a goal, we allow all tasks in that goal to access the passthru
# args. This is to allow the more intuitive
# pants run <target> -- <passthru args>
# instead of requiring
# pants run.py <target> -- <passthru args>.
#
# However note that in the case where multiple tasks run in the same goal, e.g.,
# pants test <target> -- <passthru args>
# Then, e.g., both junit and pytest will get the passthru args even though the user probably
# only intended them to go to one of them. If the wrong one is not a no-op then the error will
# be unpredictable. However this is not a common case, and can be circumvented with an
# explicit test.pytest or test.junit scope.
if (scope and self._passthru_owner and scope.startswith(self._passthru_owner) and
(len(scope) == len(self._passthru_owner) or scope[len(self._passthru_owner)] == '.')):
return self._passthru
else:
return []
def register(self, scope, *args, **kwargs):
"""Register an option in the given scope, using argparse params."""
self.get_parser(scope).register(*args, **kwargs)
def registration_function_for_optionable(self, optionable_class):
"""Returns a function for registering argparse args on the given scope."""
# TODO(benjy): Make this an instance of a class that implements __call__, so we can
# docstring it, and so it's less weird than attatching properties to a function.
def register(*args, **kwargs):
kwargs['registering_class'] = optionable_class
self.register(optionable_class.options_scope, *args, **kwargs)
# Clients can access the bootstrap option values as register.bootstrap.
register.bootstrap = self.bootstrap_option_values()
# Clients can access the scope as register.scope.
register.scope = optionable_class.options_scope
return register
def get_parser(self, scope):
"""Returns the parser for the given scope, so code can register on it directly."""
return self._parser_hierarchy.get_parser_by_scope(scope)
def walk_parsers(self, callback):
self._parser_hierarchy.walk(callback)
def for_scope(self, scope):
"""Return the option values for the given scope.
Values are attributes of the returned object, e.g., options.foo.
Computed lazily per scope.
"""
# Short-circuit, if already computed.
if scope in self._values_by_scope:
return self._values_by_scope[scope]
# First get enclosing scope's option values, if any.
if scope == GLOBAL_SCOPE:
values = OptionValueContainer()
else:
values = copy.deepcopy(self.for_scope(scope.rpartition('.')[0]))
# Now add our values.
flags_in_scope = self._scope_to_flags.get(scope, [])
self._parser_hierarchy.get_parser_by_scope(scope).parse_args(flags_in_scope, values)
self._values_by_scope[scope] = values
return values
def registration_args_iter_for_scope(self, scope):
"""Returns an iterator over the registration arguments of each option in this scope.
See `Parser.registration_args_iter` for details.
"""
return self._parser_hierarchy.get_parser_by_scope(scope).registration_args_iter()
def get_fingerprintable_for_scope(self, scope):
"""Returns a list of fingerprintable (option type, option value) pairs for the given scope.
Fingerprintable options are options registered via a "fingerprint=True" kwarg.
"""
pairs = []
# Note that we iterate over options registered at `scope` and at all enclosing scopes, since
# option-using code can read those values indirectly via its own OptionValueContainer, so
# they can affect that code's output.
registration_scope = scope
while registration_scope is not None:
# This iterator will have already sorted the options, so their order is deterministic.
for (name, _, kwargs) in self.registration_args_iter_for_scope(registration_scope):
if kwargs.get('recursive') and not kwargs.get('recursive_root'):
continue # We only need to fprint recursive options once.
if kwargs.get('fingerprint') is not True:
continue
# Note that we read the value from scope, even if the registration was on an enclosing
# scope, to get the right value for recursive options (and because this mirrors what
# option-using code does).
val = self.for_scope(scope)[name]
val_type = kwargs.get('type', '')
pairs.append((val_type, val))
registration_scope = (None if registration_scope == ''
else registration_scope.rpartition('.')[0])
return pairs
def __getitem__(self, scope):
# TODO(John Sirois): Mainly supports use of dict<str, dict<str, str>> for mock options in tests,
# Consider killing if tests consolidate on using TestOptions instead of the raw dicts.
return self.for_scope(scope)
def bootstrap_option_values(self):
"""Return the option values for bootstrap options.
General code can also access these values in the global scope. But option registration code
cannot, hence this special-casing of this small set of options.
"""
return self._bootstrap_option_values
def for_global_scope(self):
"""Return the option values for the global scope."""
return self.for_scope(GLOBAL_SCOPE)
def print_help_if_requested(self):
"""If help was requested, print it and return True.
Otherwise return False.
"""
if self._help_request:
def print_hint():
print('Use `pants goals` to list goals.')
print('Use `pants help` to get help.')
if isinstance(self._help_request, OptionsHelp):
self._print_options_help()
elif isinstance(self._help_request, UnknownGoalHelp):
print('Unknown goals: {}'.format(', '.join(self._help_request.unknown_goals)))
print_hint()
# TODO: Should probably cause a non-zero exit code.
elif isinstance(self._help_request, NoGoalHelp):
print('No goals specified.')
print_hint()
# TODO: Should probably cause a non-zero exit code.
return True
else:
return False
def _print_options_help(self):
"""Print a help screen.
Assumes that self._help_request is an instance of OptionsHelp.
Note: Ony useful if called after options have been registered.
"""
show_all_help = self._help_request.all_scopes
if show_all_help:
help_scopes = self._known_scope_to_info.keys()
else:
# The scopes explicitly mentioned by the user on the cmd line.
help_scopes = set(self._scope_to_flags.keys()) - set([GLOBAL_SCOPE])
# Add all subscopes (e.g., so that `pants help compile` shows help for all tasks under
# `compile`.) Note that sorting guarantees that we only need to check the immediate parent.
for scope in sorted(self._known_scope_to_info.keys()):
if scope.partition('.')[0] in help_scopes:
help_scopes.add(scope)
help_scope_infos = [self._known_scope_to_info[s] for s in sorted(help_scopes)]
if help_scope_infos:
for scope_info in help_scope_infos:
help_str = self._format_options_help_for_scope(scope_info)
if help_str:
print(help_str)
return
else:
print(pants_release())
print('\nUsage:')
print(' ./pants [option ...] [goal ...] [target...] Attempt the specified goals.')
print(' ./pants help Get help.')
print(' ./pants help [goal] Get help for a goal.')
print(' ./pants help-advanced [goal] Get help for a goal\'s advanced options.')
print(' ./pants help-all Get help for all goals.')
print(' ./pants goals List all installed goals.')
print('')
print(' [target] accepts two special forms:')
print(' dir: to include all targets in the specified directory.')
print(' dir:: to include all targets found recursively under the directory.')
print('\nFriendly docs:\n http://pantsbuild.github.io/')
print(self.get_parser(GLOBAL_SCOPE).format_help('Global', 'Global options',
show_advanced=self._help_request.advanced))
def _format_options_help_for_scope(self, scope_info):
"""Generate a help message for options at the specified scope.
Assumes that self._help_request is an instance of OptionsHelp.
:param scope_info: A ScopeInfo for the speicified scope.
"""
description = scope_info.optionable_cls.get_description() if scope_info.optionable_cls else None
return self.get_parser(scope_info.scope).format_help(scope_info.scope, description,
self._help_request.advanced)
| areitz/pants | src/python/pants/option/options.py | Python | apache-2.0 | 16,359 |
# -*- coding: utf-8 -*-
'''
Manage RabbitMQ Virtual Hosts
=============================
Example:
.. code-block:: yaml
virtual_host:
rabbitmq_vhost.present:
- user: rabbit_user
- conf: .*
- write: .*
- read: .*
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if RabbitMQ is installed.
'''
return salt.utils.which('rabbitmqctl') is not None
def present(name):
'''
Ensure the RabbitMQ VHost exists.
name
VHost name
user
Initial user permission to set on the VHost, if present
.. deprecated:: 2015.8.0
owner
Initial owner permission to set on the VHost, if present
.. deprecated:: 2015.8.0
conf
Initial conf string to apply to the VHost and user. Defaults to .*
.. deprecated:: 2015.8.0
write
Initial write permissions to apply to the VHost and user.
Defaults to .*
.. deprecated:: 2015.8.0
read
Initial read permissions to apply to the VHost and user.
Defaults to .*
.. deprecated:: 2015.8.0
runas
Name of the user to run the command
.. deprecated:: 2015.8.0
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
vhost_exists = __salt__['rabbitmq.vhost_exists'](name)
if vhost_exists:
ret['comment'] = 'Virtual Host \'{0}\' already exists.'.format(name)
return ret
if not __opts__['test']:
result = __salt__['rabbitmq.add_vhost'](name)
if 'Error' in result:
ret['result'] = False
ret['comment'] = result['Error']
return ret
elif 'Added' in result:
ret['comment'] = result['Added']
# If we've reached this far before returning, we have changes.
ret['changes'] = {'old': '', 'new': name}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Virtual Host \'{0}\' will be created.'.format(name)
return ret
def absent(name):
'''
Ensure the RabbitMQ Virtual Host is absent
name
Name of the Virtual Host to remove
runas
User to run the command
.. deprecated:: 2015.8.0
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
vhost_exists = __salt__['rabbitmq.vhost_exists'](name)
if not vhost_exists:
ret['comment'] = 'Virtual Host \'{0}\' is not present.'.format(name)
return ret
if not __opts__['test']:
result = __salt__['rabbitmq.delete_vhost'](name)
if 'Error' in result:
ret['result'] = False
ret['comment'] = result['Error']
return ret
elif 'Deleted' in result:
ret['comment'] = result['Deleted']
# If we've reached this far before returning, we have changes.
ret['changes'] = {'new': '', 'old': name}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Virtual Host \'{0}\' will be removed.'.format(name)
return ret
| stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/states/rabbitmq_vhost.py | Python | apache-2.0 | 3,164 |
# encoding: utf-8
from gerencianet import Gerencianet
from credentials import CREDENTIALS
gn = Gerencianet(CREDENTIALS)
params = {
'id': 1
}
body = {
'notification_url': 'http://yourdomain.com',
'custom_id': 'my_new_id'
}
response = gn.update_subscription_metadata(params=params, body=body)
print(response)
| gerencianet/gn-api-sdk-python | examples/update_subscription_metadata.py | Python | mit | 325 |
#!/usr/bin/env python
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script for running the unit test and example tests for the python
binding."""
import optparse
import os
import subprocess
import sys
def run_script(script_name, *args):
command = [sys.executable, script_name.replace("/", os.path.sep)]
command.extend(args)
return subprocess.Popen(command)
if __name__ == "__main__":
usage = 'usage: %prog [options] arg'
parser = optparse.OptionParser(usage)
parser.add_option('-d', '--driver', dest='driver', action='store',
default='firefox', type='choice',
choices=['chrome', 'firefox', 'remote'],
help='Which driver to test.')
(options, args) = parser.parse_args()
driver_tests_dict = {
'chrome': ['api_examples'],
'firefox': ['api_examples', 'cookie_tests', 'firefox_launcher_tests'],
'remote': ['api_examples'],
}
base_dir = os.path.abspath(os.path.dirname(__file__))
print 'base_dir:',base_dir
os.environ["WEBDRIVER"] = base_dir
os.environ["PYTHONPATH"] = os.pathsep.join([os.environ.get("PYTHONPATH", ""),
os.path.join(base_dir, "../../../", "firefox", "lib-src"),
os.path.join(base_dir, '..')])
try:
for test in driver_tests_dict[options.driver]:
process = run_script(os.path.join(base_dir, "%s_tests/%s.py" % (options.driver, test)))
assert process.wait() == 0, "Test %s failed" % test
finally:
try:
os.kill(process.pid, 9)
except:
pass
| PolicyStat/selenium-old | py_test.py | Python | apache-2.0 | 2,243 |
"""Python wrappers around Brain.
This file is MACHINE GENERATED! Do not edit.
"""
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.ops import op_def_library
def apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad,
use_locking=None, name=None):
r"""Update '*var' according to the adadelta scheme.
accum = rho() * accum + (1 - rho()) * grad.square();
update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
update_accum = rho() * update_accum + (1 - rho()) * update.square();
var -= update;
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
accum_update: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `var`.
Constant factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var, accum and update_accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
return _op_def_lib.apply_op("ApplyAdadelta", var=var, accum=accum,
accum_update=accum_update, lr=lr, rho=rho,
epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
def apply_adagrad(var, accum, lr, grad, use_locking=None, name=None):
r"""Update '*var' according to the adagrad scheme.
accum += grad * grad
var -= lr * grad * (1 / sqrt(accum))
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
return _op_def_lib.apply_op("ApplyAdagrad", var=var, accum=accum, lr=lr,
grad=grad, use_locking=use_locking, name=name)
def apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon,
grad, use_locking=None, name=None):
r"""Update '*var' according to the Adam algorithm.
lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
m: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
v: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
beta1_power: A `Tensor`. Must have the same type as `var`.
Must be a scalar.
beta2_power: A `Tensor`. Must have the same type as `var`.
Must be a scalar.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
beta1: A `Tensor`. Must have the same type as `var`.
Momentum factor. Must be a scalar.
beta2: A `Tensor`. Must have the same type as `var`.
Momentum factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, m, and v tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
return _op_def_lib.apply_op("ApplyAdam", var=var, m=m, v=v,
beta1_power=beta1_power,
beta2_power=beta2_power, lr=lr, beta1=beta1,
beta2=beta2, epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
def apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power,
use_locking=None, name=None):
r"""Update '*var' according to the Ftrl-proximal scheme.
accum_new = accum + grad * grad
linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
linear: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
grad: A `Tensor`. Must have the same type as `var`. The gradient.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
lr_power: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
return _op_def_lib.apply_op("ApplyFtrl", var=var, accum=accum,
linear=linear, grad=grad, lr=lr, l1=l1, l2=l2,
lr_power=lr_power, use_locking=use_locking,
name=name)
def apply_gradient_descent(var, alpha, delta, use_locking=None, name=None):
r"""Update '*var' by subtracting 'alpha' * 'delta' from it.
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
alpha: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
delta: A `Tensor`. Must have the same type as `var`. The change.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
return _op_def_lib.apply_op("ApplyGradientDescent", var=var, alpha=alpha,
delta=delta, use_locking=use_locking, name=name)
def apply_momentum(var, accum, lr, grad, momentum, use_locking=None,
name=None):
r"""Update '*var' according to the momentum scheme.
accum = accum * momentum + grad
var -= lr * accum
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
momentum: A `Tensor`. Must have the same type as `var`.
Momentum. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
return _op_def_lib.apply_op("ApplyMomentum", var=var, accum=accum, lr=lr,
grad=grad, momentum=momentum,
use_locking=use_locking, name=name)
def apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad,
use_locking=None, name=None):
r"""Update '*var' according to the RMSProp algorithm.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
var <- var - mom
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
ms: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
mom: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `var`.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, m, and v tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
return _op_def_lib.apply_op("ApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr,
rho=rho, momentum=momentum, epsilon=epsilon,
grad=grad, use_locking=use_locking, name=name)
def sparse_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad,
indices, use_locking=None, name=None):
r"""var: Should be from a Variable().
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
accum: A mutable `Tensor`. Must have the same type as `var`.
accum_update: A mutable `Tensor`. Must have the same type as `var`.
lr: A `Tensor`. Must have the same type as `var`.
rho: A `Tensor`. Must have the same type as `var`.
epsilon: A `Tensor`. Must have the same type as `var`.
grad: A `Tensor`. Must have the same type as `var`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
use_locking: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
return _op_def_lib.apply_op("SparseApplyAdadelta", var=var, accum=accum,
accum_update=accum_update, lr=lr, rho=rho,
epsilon=epsilon, grad=grad, indices=indices,
use_locking=use_locking, name=name)
def sparse_apply_adagrad(var, accum, lr, grad, indices, use_locking=None,
name=None):
r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
That is for rows we have grad for, we update var and accum as follows:
accum += grad * grad
var -= lr * grad * (1 / sqrt(accum))
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Learning rate. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
return _op_def_lib.apply_op("SparseApplyAdagrad", var=var, accum=accum,
lr=lr, grad=grad, indices=indices,
use_locking=use_locking, name=name)
def sparse_apply_ftrl(var, accum, linear, grad, indices, lr, l1, l2, lr_power,
use_locking=None, name=None):
r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme.
That is for rows we have grad for, we update var, accum and linear as follows:
accum_new = accum + grad * grad
linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
linear: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
lr_power: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
return _op_def_lib.apply_op("SparseApplyFtrl", var=var, accum=accum,
linear=linear, grad=grad, indices=indices,
lr=lr, l1=l1, l2=l2, lr_power=lr_power,
use_locking=use_locking, name=name)
def sparse_apply_momentum(var, accum, lr, grad, indices, momentum,
use_locking=None, name=None):
r"""Update relevant entries in '*var' and '*accum' according to the momentum scheme.
That is for rows we have grad for, we update var and accum as follows:
accum = accum * momentum + grad
var -= lr * accum
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Learning rate. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
momentum: A `Tensor`. Must have the same type as `var`.
Momentum. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
return _op_def_lib.apply_op("SparseApplyMomentum", var=var, accum=accum,
lr=lr, grad=grad, indices=indices,
momentum=momentum, use_locking=use_locking,
name=name)
def _InitOpDefLibrary():
op_list = op_def_pb2.OpList()
text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
op_def_registry.register_op_list(op_list)
op_def_lib = op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "ApplyAdadelta"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum_update"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyAdagrad"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyAdam"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "m"
type_attr: "T"
is_ref: true
}
input_arg {
name: "v"
type_attr: "T"
is_ref: true
}
input_arg {
name: "beta1_power"
type_attr: "T"
}
input_arg {
name: "beta2_power"
type_attr: "T"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "beta1"
type_attr: "T"
}
input_arg {
name: "beta2"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyFtrl"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "linear"
type_attr: "T"
is_ref: true
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "lr_power"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyGradientDescent"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "alpha"
type_attr: "T"
}
input_arg {
name: "delta"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyMomentum"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyRMSProp"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "ms"
type_attr: "T"
is_ref: true
}
input_arg {
name: "mom"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseApplyAdadelta"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum_update"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseApplyAdagrad"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseApplyFtrl"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "linear"
type_attr: "T"
is_ref: true
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "lr_power"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseApplyMomentum"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "momentum"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
"""
_op_def_lib = _InitOpDefLibrary()
| shishaochen/TensorFlow-0.8-Win | tensorflow/python/training/gen_training_ops.py | Python | apache-2.0 | 30,814 |
#-*- coding: utf-8 -*-
import argparse
import os
import numpy as np
import paddle.fluid as fluid
from train import get_player
from tqdm import tqdm
def predict_action(exe, state, predict_program, feed_names, fetch_targets,
action_dim):
if np.random.random() < 0.01:
act = np.random.randint(action_dim)
else:
state = np.expand_dims(state, axis=0)
pred_Q = exe.run(predict_program,
feed={feed_names[0]: state.astype('float32')},
fetch_list=fetch_targets)[0]
pred_Q = np.squeeze(pred_Q, axis=0)
act = np.argmax(pred_Q)
return act
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--use_cuda', action='store_true', help='if set, use cuda')
parser.add_argument('--rom', type=str, required=True, help='atari rom')
parser.add_argument(
'--model_path', type=str, required=True, help='dirname to load model')
parser.add_argument(
'--viz',
type=float,
default=0,
help='''viz: visualization setting:
Set to 0 to disable;
Set to a positive number to be the delay between frames to show.
''')
args = parser.parse_args()
env = get_player(args.rom, viz=args.viz)
place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
[predict_program, feed_names,
fetch_targets] = fluid.io.load_inference_model(args.model_path, exe)
episode_reward = []
for _ in tqdm(xrange(30), desc='eval agent'):
state = env.reset()
total_reward = 0
while True:
action = predict_action(exe, state, predict_program, feed_names,
fetch_targets, env.action_space.n)
state, reward, isOver, info = env.step(action)
total_reward += reward
if isOver:
break
episode_reward.append(total_reward)
eval_reward = np.mean(episode_reward)
print('Average reward of 30 epidose: {}'.format(eval_reward))
| lcy-seso/models | fluid/DeepQNetwork/play.py | Python | apache-2.0 | 2,279 |
Subsets and Splits