code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from ... import *
from talon.signature.learning import featurespace as fs
def test_apply_features():
s = '''This is John Doe
Tuesday @3pm suits. I'll chat to you then.
VP Research and Development, Xxxx Xxxx Xxxxx
555-226-2345
[email protected]'''
sender = 'John <[email protected]>'
features = fs.features(sender)
result = fs.apply_features(s, features)
# note that we don't consider the first line because signatures don't
# usually take all the text, empty lines are not considered
eq_(result, [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
with patch.object(fs, 'SIGNATURE_MAX_LINES', 5):
features = fs.features(sender)
new_result = fs.apply_features(s, features)
# result remains the same because we don't consider empty lines
eq_(result, new_result)
def test_build_pattern():
s = '''John Doe
VP Research and Development, Xxxx Xxxx Xxxxx
555-226-2345
[email protected]'''
sender = 'John <[email protected]>'
features = fs.features(sender)
result = fs.build_pattern(s, features)
eq_(result, [2, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1])
| mailgun/talon | tests/signature/learning/featurespace_test.py | Python | apache-2.0 | 1,402 |
#!/usr/bin/env python3
"""Generate an updated requirements_all.txt."""
import importlib
import os
import pathlib
import pkgutil
import re
import sys
from script.hassfest.model import Integration
COMMENT_REQUIREMENTS = (
"Adafruit-DHT",
"Adafruit_BBIO",
"avion",
"beacontools",
"blinkt",
"bluepy",
"bme680",
"credstash",
"decora",
"envirophat",
"evdev",
"face_recognition",
"fritzconnection",
"i2csense",
"opencv-python-headless",
"py_noaa",
"VL53L1X2",
"pybluez",
"pycups",
"PySwitchbot",
"pySwitchmate",
"python-eq3bt",
"python-lirc",
"pyuserinput",
"raspihats",
"rpi-rf",
"RPi.GPIO",
"smbus-cffi",
)
TEST_REQUIREMENTS = (
"adguardhome",
"ambiclimate",
"aioambient",
"aioautomatic",
"aiobotocore",
"aioesphomeapi",
"aiohttp_cors",
"aiohue",
"aionotion",
"aiounifi",
"aioswitcher",
"aiowwlln",
"apns2",
"aprslib",
"av",
"axis",
"caldav",
"coinmarketcap",
"defusedxml",
"dsmr_parser",
"eebrightbox",
"emulated_roku",
"enocean",
"ephem",
"evohomeclient",
"feedparser-homeassistant",
"foobot_async",
"geojson_client",
"geopy",
"georss_generic_client",
"georss_ign_sismologia_client",
"georss_qld_bushfire_alert_client",
"getmac",
"google-api-python-client",
"gTTS-token",
"ha-ffmpeg",
"hangups",
"HAP-python",
"hass-nabucasa",
"haversine",
"hbmqtt",
"hdate",
"holidays",
"home-assistant-frontend",
"homekit[IP]",
"homematicip",
"httplib2",
"huawei-lte-api",
"influxdb",
"jsonpath",
"libpurecool",
"libsoundtouch",
"luftdaten",
"pyMetno",
"mbddns",
"mficlient",
"netdisco",
"numpy",
"oauth2client",
"paho-mqtt",
"pexpect",
"pilight",
"pmsensor",
"prometheus_client",
"ptvsd",
"pushbullet.py",
"py-canary",
"pyblackbird",
"pydeconz",
"pydispatcher",
"pyheos",
"pyhomematic",
"pyiqvia",
"pylitejet",
"pymfy",
"pymonoprice",
"pynx584",
"pyopenuv",
"pyotp",
"pyps4-homeassistant",
"pysmartapp",
"pysmartthings",
"pysonos",
"pyqwikswitch",
"PyRMVtransport",
"PyTransportNSW",
"pyspcwebgw",
"python-forecastio",
"python-nest",
"python_awair",
"python-velbus",
"pytradfri[async]",
"pyunifi",
"pyupnp-async",
"pyvesync",
"pywebpush",
"pyHS100",
"PyNaCl",
"regenmaschine",
"restrictedpython",
"rflink",
"ring_doorbell",
"rxv",
"simplisafe-python",
"sleepyq",
"smhi-pkg",
"somecomfort",
"sqlalchemy",
"srpenergy",
"statsd",
"toonapilib",
"twentemilieu",
"uvcclient",
"vsure",
"warrant",
"pythonwhois",
"wakeonlan",
"vultr",
"YesssSMS",
"ruamel.yaml",
"zeroconf",
"zigpy-homeassistant",
"bellows-homeassistant",
"py17track",
)
IGNORE_PIN = ("colorlog>2.1,<3", "keyring>=9.3,<10.0", "urllib3")
IGNORE_REQ = ("colorama<=1",) # Windows only requirement in check_config
URL_PIN = (
"https://developers.home-assistant.io/docs/"
"creating_platform_code_review.html#1-requirements"
)
CONSTRAINT_PATH = os.path.join(
os.path.dirname(__file__), "../homeassistant/package_constraints.txt"
)
CONSTRAINT_BASE = """
pycryptodome>=3.6.6
# Breaks Python 3.6 and is not needed for our supported Python versions
enum34==1000000000.0.0
# This is a old unmaintained library and is replaced with pycryptodome
pycrypto==1000000000.0.0
# Contains code to modify Home Assistant to work around our rules
python-systemair-savecair==1000000000.0.0
"""
def explore_module(package, explore_children):
"""Explore the modules."""
module = importlib.import_module(package)
found = []
if not hasattr(module, "__path__"):
return found
for _, name, _ in pkgutil.iter_modules(module.__path__, package + "."):
found.append(name)
if explore_children:
found.extend(explore_module(name, False))
return found
def core_requirements():
"""Gather core requirements out of setup.py."""
with open("setup.py") as inp:
reqs_raw = re.search(r"REQUIRES = \[(.*?)\]", inp.read(), re.S).group(1)
return [x[1] for x in re.findall(r"(['\"])(.*?)\1", reqs_raw)]
def gather_recursive_requirements(domain, seen=None):
"""Recursively gather requirements from a module."""
if seen is None:
seen = set()
seen.add(domain)
integration = Integration(
pathlib.Path("homeassistant/components/{}".format(domain))
)
integration.load_manifest()
reqs = set(integration.manifest["requirements"])
for dep_domain in integration.manifest["dependencies"]:
reqs.update(gather_recursive_requirements(dep_domain, seen))
return reqs
def comment_requirement(req):
"""Comment out requirement. Some don't install on all systems."""
return any(ign in req for ign in COMMENT_REQUIREMENTS)
def gather_modules():
"""Collect the information."""
reqs = {}
errors = []
gather_requirements_from_manifests(errors, reqs)
gather_requirements_from_modules(errors, reqs)
for key in reqs:
reqs[key] = sorted(reqs[key], key=lambda name: (len(name.split(".")), name))
if errors:
print("******* ERROR")
print("Errors while importing: ", ", ".join(errors))
return None
return reqs
def gather_requirements_from_manifests(errors, reqs):
"""Gather all of the requirements from manifests."""
integrations = Integration.load_dir(pathlib.Path("homeassistant/components"))
for domain in sorted(integrations):
integration = integrations[domain]
if not integration.manifest:
errors.append("The manifest for integration {} is invalid.".format(domain))
continue
process_requirements(
errors,
integration.manifest["requirements"],
"homeassistant.components.{}".format(domain),
reqs,
)
def gather_requirements_from_modules(errors, reqs):
"""Collect the requirements from the modules directly."""
for package in sorted(
explore_module("homeassistant.scripts", True)
+ explore_module("homeassistant.auth", True)
):
try:
module = importlib.import_module(package)
except ImportError as err:
print("{}: {}".format(package.replace(".", "/") + ".py", err))
errors.append(package)
continue
if getattr(module, "REQUIREMENTS", None):
process_requirements(errors, module.REQUIREMENTS, package, reqs)
def process_requirements(errors, module_requirements, package, reqs):
"""Process all of the requirements."""
for req in module_requirements:
if req in IGNORE_REQ:
continue
if "://" in req:
errors.append(
"{}[Only pypi dependencies are allowed: {}]".format(package, req)
)
if req.partition("==")[1] == "" and req not in IGNORE_PIN:
errors.append(
"{}[Please pin requirement {}, see {}]".format(package, req, URL_PIN)
)
reqs.setdefault(req, []).append(package)
def generate_requirements_list(reqs):
"""Generate a pip file based on requirements."""
output = []
for pkg, requirements in sorted(reqs.items(), key=lambda item: item[0]):
for req in sorted(requirements):
output.append("\n# {}".format(req))
if comment_requirement(pkg):
output.append("\n# {}\n".format(pkg))
else:
output.append("\n{}\n".format(pkg))
return "".join(output)
def requirements_all_output(reqs):
"""Generate output for requirements_all."""
output = []
output.append("# Home Assistant core")
output.append("\n")
output.append("\n".join(core_requirements()))
output.append("\n")
output.append(generate_requirements_list(reqs))
return "".join(output)
def requirements_test_output(reqs):
"""Generate output for test_requirements."""
output = []
output.append("# Home Assistant test")
output.append("\n")
with open("requirements_test.txt") as test_file:
output.append(test_file.read())
output.append("\n")
filtered = {
key: value
for key, value in reqs.items()
if any(
re.search(r"(^|#){}($|[=><])".format(re.escape(ign)), key) is not None
for ign in TEST_REQUIREMENTS
)
}
output.append(generate_requirements_list(filtered))
return "".join(output)
def gather_constraints():
"""Construct output for constraint file."""
return "\n".join(
sorted(
core_requirements() + list(gather_recursive_requirements("default_config"))
)
+ [""]
)
def write_requirements_file(data):
"""Write the modules to the requirements_all.txt."""
with open("requirements_all.txt", "w+", newline="\n") as req_file:
req_file.write(data)
def write_test_requirements_file(data):
"""Write the modules to the requirements_test_all.txt."""
with open("requirements_test_all.txt", "w+", newline="\n") as req_file:
req_file.write(data)
def write_constraints_file(data):
"""Write constraints to a file."""
with open(CONSTRAINT_PATH, "w+", newline="\n") as req_file:
req_file.write(data + CONSTRAINT_BASE)
def validate_requirements_file(data):
"""Validate if requirements_all.txt is up to date."""
with open("requirements_all.txt", "r") as req_file:
return data == req_file.read()
def validate_requirements_test_file(data):
"""Validate if requirements_test_all.txt is up to date."""
with open("requirements_test_all.txt", "r") as req_file:
return data == req_file.read()
def validate_constraints_file(data):
"""Validate if constraints is up to date."""
with open(CONSTRAINT_PATH, "r") as req_file:
return data + CONSTRAINT_BASE == req_file.read()
def main(validate):
"""Run the script."""
if not os.path.isfile("requirements_all.txt"):
print("Run this from HA root dir")
return 1
data = gather_modules()
if data is None:
return 1
constraints = gather_constraints()
reqs_file = requirements_all_output(data)
reqs_test_file = requirements_test_output(data)
if validate:
errors = []
if not validate_requirements_file(reqs_file):
errors.append("requirements_all.txt is not up to date")
if not validate_requirements_test_file(reqs_test_file):
errors.append("requirements_test_all.txt is not up to date")
if not validate_constraints_file(constraints):
errors.append("home-assistant/package_constraints.txt is not up to date")
if errors:
print("******* ERROR")
print("\n".join(errors))
print("Please run script/gen_requirements_all.py")
return 1
return 0
write_requirements_file(reqs_file)
write_test_requirements_file(reqs_test_file)
write_constraints_file(constraints)
return 0
if __name__ == "__main__":
_VAL = sys.argv[-1] == "validate"
sys.exit(main(_VAL))
| fbradyirl/home-assistant | script/gen_requirements_all.py | Python | apache-2.0 | 11,383 |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Source file annotation for coverage.py."""
import os
import re
from coverage.files import flat_rootname
from coverage.misc import ensure_dir, isolate_module
from coverage.report import get_analysis_to_report
os = isolate_module(os)
class AnnotateReporter:
"""Generate annotated source files showing line coverage.
This reporter creates annotated copies of the measured source files. Each
.py file is copied as a .py,cover file, with a left-hand margin annotating
each line::
> def h(x):
- if 0: #pragma: no cover
- pass
> if x == 1:
! a = 1
> else:
> a = 2
> h(2)
Executed lines use '>', lines not executed use '!', lines excluded from
consideration use '-'.
"""
def __init__(self, coverage):
self.coverage = coverage
self.config = self.coverage.config
self.directory = None
blank_re = re.compile(r"\s*(#|$)")
else_re = re.compile(r"\s*else\s*:\s*(#|$)")
def report(self, morfs, directory=None):
"""Run the report.
See `coverage.report()` for arguments.
"""
self.directory = directory
self.coverage.get_data()
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
self.annotate_file(fr, analysis)
def annotate_file(self, fr, analysis):
"""Annotate a single file.
`fr` is the FileReporter for the file to annotate.
"""
statements = sorted(analysis.statements)
missing = sorted(analysis.missing)
excluded = sorted(analysis.excluded)
if self.directory:
ensure_dir(self.directory)
dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
if dest_file.endswith("_py"):
dest_file = dest_file[:-3] + ".py"
dest_file += ",cover"
else:
dest_file = fr.filename + ",cover"
with open(dest_file, 'w', encoding='utf8') as dest:
i = 0
j = 0
covered = True
source = fr.source()
for lineno, line in enumerate(source.splitlines(True), start=1):
while i < len(statements) and statements[i] < lineno:
i += 1
while j < len(missing) and missing[j] < lineno:
j += 1
if i < len(statements) and statements[i] == lineno:
covered = j >= len(missing) or missing[j] > lineno
if self.blank_re.match(line):
dest.write(' ')
elif self.else_re.match(line):
# Special logic for lines containing only 'else:'.
if i >= len(statements) and j >= len(missing):
dest.write('! ')
elif i >= len(statements) or j >= len(missing):
dest.write('> ')
elif statements[i] == missing[j]:
dest.write('! ')
else:
dest.write('> ')
elif lineno in excluded:
dest.write('- ')
elif covered:
dest.write('> ')
else:
dest.write('! ')
dest.write(line)
| sonntagsgesicht/regtest | .aux/venv/lib/python3.9/site-packages/coverage/annotate.py | Python | apache-2.0 | 3,528 |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
import mock
from oslo_serialization import jsonutils as json
from tempest import clients
from tempest.cmd import init
from tempest.cmd import verify_tempest_config
from tempest.common import credentials_factory
from tempest import config
from tempest.lib.common import rest_client
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest.tests import base
from tempest.tests import fake_config
class TestGetAPIVersions(base.TestCase):
def test_remove_version_project(self):
f = verify_tempest_config._remove_version_project
self.assertEqual('/', f('/v2.1/%s/' % data_utils.rand_uuid_hex()))
self.assertEqual('', f('/v2.1/tenant_id'))
self.assertEqual('', f('/v3'))
self.assertEqual('/', f('/v3/'))
self.assertEqual('/something/', f('/something/v2.1/tenant_id/'))
self.assertEqual('/something', f('/something/v2.1/tenant_id'))
self.assertEqual('/something', f('/something/v3'))
self.assertEqual('/something/', f('/something/v3/'))
self.assertEqual('/', f('/')) # http://localhost/
self.assertEqual('', f('')) # http://localhost
def test_url_grab_versioned_nova_nossl(self):
base_url = 'http://127.0.0.1:8774/v2/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('http://127.0.0.1:8774/', endpoint)
def test_url_grab_versioned_nova_ssl(self):
base_url = 'https://127.0.0.1:8774/v3/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('https://127.0.0.1:8774/', endpoint)
def test_get_unversioned_endpoint_base(self):
base_url = 'https://127.0.0.1:5000/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('https://127.0.0.1:5000/', endpoint)
def test_get_unversioned_endpoint_subpath(self):
base_url = 'https://127.0.0.1/identity/v3'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('https://127.0.0.1/identity', endpoint)
def test_get_unversioned_endpoint_subpath_trailing_solidus(self):
base_url = 'https://127.0.0.1/identity/v3/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('https://127.0.0.1/identity/', endpoint)
class TestDiscovery(base.TestCase):
def setUp(self):
super(TestDiscovery, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.patchobject(config, 'TempestConfigPrivate',
fake_config.FakePrivate)
def test_get_keystone_api_versions(self):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v2.0'}, {'id': 'v3.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(fixtures.MockPatch(
'tempest.lib.common.http.ClosingHttp.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'keystone')
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
def test_get_cinder_api_versions(self):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(fixtures.MockPatch(
'tempest.lib.common.http.ClosingHttp.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
self.assertIn('v1.0', versions)
self.assertIn('v2.0', versions)
def test_get_nova_versions(self):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(fixtures.MockPatch(
'tempest.lib.common.http.ClosingHttp.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'nova')
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
def test_get_versions_invalid_response(self):
# When the response doesn't contain a JSON response, an error is
# logged.
mock_log_error = self.useFixture(fixtures.MockPatchObject(
verify_tempest_config.LOG, 'error')).mock
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint'))
# Simulated response is not JSON.
sample_body = (
'<html><head>Sample Response</head><body>This is the sample page '
'for the web server. Why are you requesting it?</body></html>')
self.useFixture(fixtures.MockPatch(
'tempest.lib.common.http.ClosingHttp.request',
return_value=(None, sample_body)))
# service value doesn't matter, just needs to match what
# _get_api_versions puts in its client_dict.
self.assertRaises(ValueError, verify_tempest_config._get_api_versions,
os=mock.MagicMock(), service='keystone')
self.assertTrue(mock_log_error.called)
def test_verify_api_versions(self):
api_services = ['cinder', 'glance', 'keystone']
fake_os = mock.MagicMock()
for svc in api_services:
m = 'verify_%s_api_versions' % svc
with mock.patch.object(verify_tempest_config, m) as verify_mock:
verify_tempest_config.verify_api_versions(fake_os, svc, True)
verify_mock.assert_called_once_with(fake_os, True)
def test_verify_api_versions_not_implemented(self):
api_services = ['cinder', 'glance', 'keystone']
fake_os = mock.MagicMock()
for svc in api_services:
m = 'verify_%s_api_versions' % svc
with mock.patch.object(verify_tempest_config, m) as verify_mock:
verify_tempest_config.verify_api_versions(fake_os, 'foo', True)
self.assertFalse(verify_mock.called)
@mock.patch('tempest.lib.common.http.ClosingHttp.request')
def test_verify_keystone_api_versions_no_v3(self, mock_request):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v2.0'}]}}
fake_resp = json.dumps(fake_resp)
mock_request.return_value = (None, fake_resp)
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_keystone_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v3',
'identity-feature-enabled',
False, True)
@mock.patch('tempest.lib.common.http.ClosingHttp.request')
def test_verify_cinder_api_versions_no_v3(self, mock_request):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
mock_request.return_value = (None, fake_resp)
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_any_call('api_v3', 'volume-feature-enabled',
False, True)
self.assertEqual(1, print_mock.call_count)
@mock.patch('tempest.lib.common.http.ClosingHttp.request')
def test_verify_cinder_api_versions_no_v2(self, mock_request):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v3.0'}]}
fake_resp = json.dumps(fake_resp)
mock_request.return_value = (None, fake_resp)
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_any_call('api_v2', 'volume-feature-enabled',
False, True)
self.assertEqual(1, print_mock.call_count)
@mock.patch('tempest.lib.common.http.ClosingHttp.request')
def test_verify_cinder_api_versions_no_v1(self, mock_request):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
fake_resp = json.dumps(fake_resp)
mock_request.return_value = (None, fake_resp)
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_not_called()
def test_verify_glance_version_no_v2_with_v1_1(self):
# This test verifies that wrong config api_v2 = True is detected
class FakeClient(object):
def get_versions(self):
return (None, ['v1.1'])
fake_os = mock.MagicMock()
fake_module = mock.MagicMock()
fake_module.ImagesClient = FakeClient
fake_os.image_v1 = fake_module
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_with('api_v2', 'image-feature-enabled',
False, True)
def test_verify_glance_version_no_v2_with_v1_0(self):
# This test verifies that wrong config api_v2 = True is detected
class FakeClient(object):
def get_versions(self):
return (None, ['v1.0'])
fake_os = mock.MagicMock()
fake_module = mock.MagicMock()
fake_module.ImagesClient = FakeClient
fake_os.image_v1 = fake_module
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_with('api_v2', 'image-feature-enabled',
False, True)
def test_verify_glance_version_no_v1(self):
# This test verifies that wrong config api_v1 = True is detected
class FakeClient(object):
def get_versions(self):
raise lib_exc.NotFound()
def list_versions(self):
return {'versions': [{'id': 'v2.0'}]}
fake_os = mock.MagicMock()
fake_module = mock.MagicMock()
fake_module.ImagesClient = FakeClient
fake_module.VersionsClient = FakeClient
fake_os.image_v1 = fake_module
fake_os.image_v2 = fake_module
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_not_called()
def test_verify_glance_version_no_version(self):
# This test verifies that wrong config api_v1 = True is detected
class FakeClient(object):
def get_versions(self):
raise lib_exc.NotFound()
def list_versions(self):
raise lib_exc.NotFound()
fake_os = mock.MagicMock()
fake_module = mock.MagicMock()
fake_module.ImagesClient = FakeClient
fake_module.VersionsClient = FakeClient
fake_os.image_v1 = fake_module
fake_os.image_v2 = fake_module
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('glance',
'service-available',
False, True)
def test_verify_extensions_neutron(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_client = mock.MagicMock()
fake_client.list_extensions = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_extension_client',
return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('fake1', results['neutron'])
self.assertTrue(results['neutron']['fake1'])
self.assertIn('fake2', results['neutron'])
self.assertTrue(results['neutron']['fake2'])
self.assertIn('fake3', results['neutron'])
self.assertFalse(results['neutron']['fake3'])
self.assertIn('not_fake', results['neutron'])
self.assertFalse(results['neutron']['not_fake'])
def test_verify_extensions_neutron_all(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_client = mock.MagicMock()
fake_client.list_extensions = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_extension_client',
return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('extensions', results['neutron'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['neutron']['extensions']))
def test_verify_extensions_neutron_none(self):
def fake_list_extensions():
return {'extensions': []}
fake_os = mock.MagicMock()
fake_client = mock.MagicMock()
fake_client.list_extensions = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_extension_client',
return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('extensions', results['neutron'])
self.assertEqual([], results['neutron']['extensions'])
def test_verify_extensions_cinder(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_client = mock.MagicMock()
fake_client.list_extensions = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_extension_client',
return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('fake1', results['cinder'])
self.assertTrue(results['cinder']['fake1'])
self.assertIn('fake2', results['cinder'])
self.assertTrue(results['cinder']['fake2'])
self.assertIn('fake3', results['cinder'])
self.assertFalse(results['cinder']['fake3'])
self.assertIn('not_fake', results['cinder'])
self.assertFalse(results['cinder']['not_fake'])
def test_verify_extensions_cinder_all(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_client = mock.MagicMock()
fake_client.list_extensions = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_extension_client',
return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('extensions', results['cinder'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['cinder']['extensions']))
def test_verify_extensions_cinder_none(self):
def fake_list_extensions():
return {'extensions': []}
fake_os = mock.MagicMock()
fake_client = mock.MagicMock()
fake_client.list_extensions = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_extension_client',
return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('extensions', results['cinder'])
self.assertEqual([], results['cinder']['extensions'])
def test_verify_extensions_nova(self):
def fake_list_extensions():
return ([{'alias': 'fake1'}, {'alias': 'fake2'},
{'alias': 'not_fake'}])
fake_os = mock.MagicMock()
fake_client = mock.MagicMock()
fake_client.list_extensions = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_extension_client',
return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('fake1', results['nova'])
self.assertTrue(results['nova']['fake1'])
self.assertIn('fake2', results['nova'])
self.assertTrue(results['nova']['fake2'])
self.assertIn('fake3', results['nova'])
self.assertFalse(results['nova']['fake3'])
self.assertIn('not_fake', results['nova'])
self.assertFalse(results['nova']['not_fake'])
def test_verify_extensions_nova_all(self):
def fake_list_extensions():
return ({'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_client = mock.MagicMock()
fake_client.list_extensions = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_extension_client',
return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('extensions', results['nova'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['nova']['extensions']))
def test_verify_extensions_nova_none(self):
def fake_list_extensions():
return ({'extensions': []})
fake_os = mock.MagicMock()
fake_client = mock.MagicMock()
fake_client.list_extensions = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_extension_client',
return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('extensions', results['nova'])
self.assertEqual([], results['nova']['extensions'])
def test_verify_extensions_swift(self):
def fake_list_extensions():
return {'fake1': 'metadata',
'fake2': 'metadata',
'not_fake': 'metadata',
'swift': 'metadata'}
fake_os = mock.MagicMock()
fake_client = mock.MagicMock()
fake_client.list_capabilities = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_extension_client',
return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os, 'swift', {})
self.assertIn('swift', results)
self.assertIn('fake1', results['swift'])
self.assertTrue(results['swift']['fake1'])
self.assertIn('fake2', results['swift'])
self.assertTrue(results['swift']['fake2'])
self.assertIn('fake3', results['swift'])
self.assertFalse(results['swift']['fake3'])
self.assertIn('not_fake', results['swift'])
self.assertFalse(results['swift']['not_fake'])
def test_verify_extensions_swift_all(self):
def fake_list_extensions():
return {'fake1': 'metadata',
'fake2': 'metadata',
'not_fake': 'metadata',
'swift': 'metadata'}
fake_os = mock.MagicMock()
fake_client = mock.MagicMock()
fake_client.list_capabilities = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_extension_client',
return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'swift', {})
self.assertIn('swift', results)
self.assertIn('extensions', results['swift'])
self.assertEqual(sorted(['not_fake', 'fake1', 'fake2']),
sorted(results['swift']['extensions']))
def test_verify_extensions_swift_none(self):
def fake_list_extensions():
return {'swift': 'metadata'}
fake_os = mock.MagicMock()
fake_client = mock.MagicMock()
fake_client.list_capabilities = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_extension_client',
return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'swift', {})
self.assertIn('swift', results)
self.assertIn('extensions', results['swift'])
self.assertEqual([], results['swift']['extensions'])
def test_get_extension_client(self):
creds = credentials_factory.get_credentials(
fill_in=False, username='fake_user', project_name='fake_project',
password='fake_password')
os = clients.Manager(creds)
for service in ['nova', 'neutron', 'swift', 'cinder']:
extensions_client = verify_tempest_config.get_extension_client(
os, service)
self.assertIsInstance(extensions_client, rest_client.RestClient)
def test_get_extension_client_sysexit(self):
creds = credentials_factory.get_credentials(
fill_in=False, username='fake_user', project_name='fake_project',
password='fake_password')
os = clients.Manager(creds)
self.assertRaises(SystemExit,
verify_tempest_config.get_extension_client,
os, 'fakeservice')
def test_get_config_file(self):
conf_dir = os.path.join(os.getcwd(), 'etc/')
conf_file = "tempest.conf.sample"
local_sample_conf_file = os.path.join(conf_dir, conf_file)
def fake_environ_get(key, default=None):
if key == 'TEMPEST_CONFIG_DIR':
return conf_dir
elif key == 'TEMPEST_CONFIG':
return 'tempest.conf.sample'
return default
with mock.patch('os.environ.get', side_effect=fake_environ_get,
autospec=True):
init_cmd = init.TempestInit(None, None)
init_cmd.generate_sample_config(os.path.join(conf_dir, os.pardir))
self.assertTrue(os.path.isfile(local_sample_conf_file),
local_sample_conf_file)
file_pointer = verify_tempest_config._get_config_file()
self.assertEqual(local_sample_conf_file, file_pointer.name)
with open(local_sample_conf_file, 'r+') as f:
local_sample_conf_contents = f.read()
self.assertEqual(local_sample_conf_contents, file_pointer.read())
if file_pointer:
file_pointer.close()
def test_print_and_or_update_true(self):
with mock.patch.object(
verify_tempest_config, 'change_option') as test_mock:
verify_tempest_config.print_and_or_update(
'fakeservice', 'fake-service-available', False, True)
test_mock.assert_called_once_with(
'fakeservice', 'fake-service-available', False)
def test_print_and_or_update_false(self):
with mock.patch.object(
verify_tempest_config, 'change_option') as test_mock:
verify_tempest_config.print_and_or_update(
'fakeservice', 'fake-service-available', False, False)
test_mock.assert_not_called()
def test_contains_version_positive_data(self):
self.assertTrue(
verify_tempest_config.contains_version('v1.', ['v1.0', 'v2.0']))
def test_contains_version_negative_data(self):
self.assertFalse(
verify_tempest_config.contains_version('v5.', ['v1.0', 'v2.0']))
| masayukig/tempest | tempest/tests/cmd/test_verify_tempest_config.py | Python | apache-2.0 | 29,474 |
import py_compile
import time
import marshal
import errno
import traceback
from os.path import join as path_join, isfile, isdir, getmtime
from translate import translate_file, translate_string, pystmts_to_string
from struct import unpack
from errors import TempyError, TempyImportError, TempyCompileError, TempyNativeCompileError
TEMPY_EXT = "tpy"
TEMPYC_EXT = "tpyc"
class TempyModule:
def __init__(self, name, env, _dir, _global=None):
self.__name__ = name
self.__env__ = env
self.__dir__ = _dir
self.__global__ = _global or {}
self.__submodule__ = {}
def __repr__(self):
return "<TempyModule %s at %s>"%(repr(self.__name__), self.__dir__)
def __getattr__(self, key):
try:
return self.__global__[key]
except KeyError:
raise AttributeError("%s has no attribute '%s'"%(repr(self), key))
class _Importer:
def __init__(self, env, current_module_name, visited):
self.env = env
self.current_module_name = current_module_name
self.visited = visited
def __call__(self, *names):
return self.env._module(names, self.visited, self.current_module_name)
def _write_code(filename, codeobject):
with open(filename, "wb") as fc:
fc.write('\0\0\0\0')
py_compile.wr_long(fc, long(time.time()))
marshal.dump(codeobject, fc)
fc.flush()
fc.seek(0, 0)
fc.write(py_compile.MAGIC)
def _naive_logger(x): print("[TempyEnvironmentLog]", x)
class CompileOption:
def __init__(self, use_tpyc=True, write_py=False, verbose=False, logger=_naive_logger):
self.use_tpyc = use_tpyc
self.write_py = write_py
self.verbose = verbose
self.logger = logger
def log(self, x):
if self.verbose:
self.logger(x)
class ModuleFetcher:
def __init__(self, systemdir=None, extradirs=None):
self.systemdir = systemdir
self.extradirs = extradirs or []
def _find(self, where, module_name):
file_path = path_join(where, module_name + "." + TEMPY_EXT)
if isfile(file_path):
return file_path
dir_path = path_join(where, module_name)
dir_init_path = path_join(where, module_name, "__init__" + "." + TEMPY_EXT)
if isdir(dir_path) and isfile(dir_init_path):
return dir_init_path
return None
def fetch_dir_by_name(self, pwd, module_name):
'''
Return (tpy filepath, if it is shared), according to given module_name.
If not found, None should be returned.
'''
first = self._find(pwd, module_name)
if first is not None:
return (first, False)
for where in [self.systemdir] + self.extradirs:
res = self._find(where, module_name)
if res is not None:
return (res, True)
return None
def _exchange_ext(s, new_ext):
rdot_idx = s.rfind(".")
if rdot_idx == -1:
return s + "." + new_ext
else:
return s[:rdot_idx] + "." + new_ext
class Environment:
def __init__(self, pwd, cache_module=True, main_name="__main__", module_fetcher=None, compile_option=None):
self.cache_module = cache_module
self.module_fetcher = module_fetcher or ModuleFetcher(pwd)
self.main_module = TempyModule(main_name, self, pwd)
self.shared_dict = {}
self.compile_option = compile_option if compile_option else CompileOption()
def _code_generation(self, tpy_path, tpyc_path, write_to_pyc=True):
if self.compile_option.write_py:
py_path = _exchange_ext(tpyc_path, "py")
try:
with open(py_path, "w") as f:
f.write(pystmts_to_string(translate_file(tpy_path)))
except IOError as err:
self.compile_option.log("IOError occured while writing .py file(%s): %s"%(tpyc_path, str(err)))
code = compile_file(tpy_path)
if write_to_pyc:
try:
_write_code(tpyc_path, code)
except IOError as err:
self.compile_option.log("IOError occured while writing codeobject to .tpyc file(%s): %s"%(tpyc_path, str(err)))
return code
def _retrieve_code(self, tpy_path, tpyc_path):
if self.compile_option.use_tpyc:
if isfile(tpyc_path):
try:
f = open(tpyc_path, "rb")
magic_str = f.read(4)
if len(magic_str) < 4 or py_compile.MAGIC != magic_str:
return self._code_generation(tpy_path, tpyc_path)
timestamp_str = f.read(4)
if len(timestamp_str) < 4:
return self._code_generation(tpy_path, tpyc_path)
tpyc_timestamp = unpack("<I", timestamp_str)[0]
try:
tpy_timestamp = long(getmtime(tpy_path))
except IOError:
tpy_timestamp = 0
if tpyc_timestamp <= tpy_timestamp: # outdated
return self._code_generation(tpy_path, tpyc_path)
code = marshal.load(f)
return code
except IOError as err:
if err.errno == errno.ENOENT: # No such file
self.compile_option.log("Failed to locate .pyc file(%s) even though It was assured that it should be present"%tpyc_path)
return self._code_generation(tpy_path, tpyc_path)
else:
raise
finally:
f.close()
else:
return self._code_generation(tpy_path, tpyc_path)
else:
return self._code_generation(tpy_path, tpyc_path, write_to_pyc=False)
def _import(self, parent_module, module_name, visited=None, invoker_module_name=None):
if module_name in parent_module.__submodule__:
return parent_module.__submodule__[module_name]
elif module_name in self.shared_dict:
return self.shared_dict[module_name]
else:
if visited is None:
visited = set()
pair = self.module_fetcher.fetch_dir_by_name(parent_module.__dir__, module_name)
if pair is None:
raise TempyImportError("No such module named '%s'"%module_name)
tpy_path, is_shared = pair
tpyc_path = _exchange_ext(tpy_path, TEMPYC_EXT)
try:
code = self._retrieve_code(tpy_path, tpyc_path)
except TempyError:
raise
except Exception as error:
err_info = str(error)
err_msg = "Cannot import the module named '%s': %s\n%s"%(module_name, err_info, traceback.format_exc())
raise TempyImportError(err_msg)
else:
lcl = {} # local
gbl = {} # global
exec(code, gbl, lcl)
if is_shared:
current_module_name = module_name
else:
current_module_name = parent_module.__name__ + "." + module_name
if current_module_name in visited:
raise TempyImportError("circular dependency: in module '%s', tried to import '%s'"%(invoker_module_name, module_name))
exec_result = lcl['__tempy_main__'](None,
_Importer(self,
current_module_name,
visited.union([current_module_name])
),
None)
mod = TempyModule(current_module_name, self, path_join(parent_module.__dir__, module_name), exec_result)
if self.cache_module:
if is_shared:
self.shared_dict[module_name] = mod
else:
parent_module.__submodule__[module_name] = mod
return mod
def _module(self, names, visited=None, invoker_module_name=None):
iter_module = self.main_module
invoker_module_name = invoker_module_name or self.main_module.__name__
for module_name in names:
iter_module = self._import(iter_module, module_name, visited, invoker_module_name)
return iter_module
def module(self, dotted_str):
return self._module(dotted_str.split("."))
def _compile_kont(stmts, filename):
src = pystmts_to_string(stmts)
try:
code = compile(src, filename, "exec")
return code
except SyntaxError as error:
raise TempyNativeCompileError(error.args)
def compile_string(path, filename="<string>"):
'''
compile tempy string into compiled python bytecode(.pyc file)
'''
stmts = translate_string(path, filename=filename)
return _compile_kont(stmts, filename)
def compile_file(path, filename=None):
'''
compile tempy file into compiled python bytecode(.pyc file)
'''
if filename is None:
filename = path
stmts = translate_file(path, filename=filename)
return _compile_kont(stmts, filename)
| Algy/tempy | tempy/env.py | Python | apache-2.0 | 9,404 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for Twitter on iOS 8+ plugin."""
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import twitter_ios
from tests.parsers.sqlite_plugins import test_lib
class TwitterIOSTest(test_lib.SQLitePluginTestCase):
"""Tests for Twitter on iOS 8+ database plugin."""
def testProcess(self):
"""Test the Process function on a Twitter iOS file."""
plugin = twitter_ios.TwitterIOSPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['twitter_ios.db'], plugin)
# We should have 184 events in total.
# * 25 Contacts creation events.
# * 25 Contacts update events.
# * 67 Status creation events.
# * 67 Status update events.
self.assertEqual(storage_writer.number_of_events, 184)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
# Test the first contact creation event.
expected_event_values = {
'data_type': 'twitter:ios:contact',
'date_time': '2007-04-22 14:42:37',
'description': (
'Breaking news alerts and updates from the BBC. For news, '
'features, analysis follow @BBCWorld (international) or @BBCNews '
'(UK). Latest sport news @BBCSport.'),
'followers_count': 19466932,
'following': 0,
'following_count': 3,
'location': 'London, UK',
'name': 'BBC Breaking News',
'profile_url': (
'https://pbs.twimg.com/profile_images/460740982498013184/'
'wIPwMwru_normal.png'),
'screen_name': 'BBCBreaking',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'url': 'http://www.bbc.co.uk/news'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Test a contact modification event.
expected_event_values = {
'data_type': 'twitter:ios:contact',
'date_time': '2015-12-02 15:35:44',
'description': (
'Breaking news alerts and updates from the BBC. For news, '
'features, analysis follow @BBCWorld (international) or @BBCNews '
'(UK). Latest sport news @BBCSport.'),
'followers_count': 19466932,
'following': 0,
'following_count': 3,
'location': 'London, UK',
'name': 'BBC Breaking News',
'profile_url': (
'https://pbs.twimg.com/profile_images/'
'460740982498013184/wIPwMwru_normal.png'),
'screen_name': 'BBCBreaking',
'timestamp_desc': definitions.TIME_DESCRIPTION_UPDATE,
'url': 'http://www.bbc.co.uk/news'}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
# Test a status creation event.
expected_event_values = {
'data_type': 'twitter:ios:status',
'date_time': '2014-09-11 11:46:16',
'favorite_count': 3,
'favorited': 0,
'name': 'Heather Mahalik',
'retweet_count': 2,
'text': 'Never forget. http://t.co/L7bjWue1A2',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'user_id': 475222380}
self.CheckEventValues(storage_writer, events[50], expected_event_values)
# Test a status update event.
expected_event_values = {
'data_type': 'twitter:ios:status',
'date_time': '2015-12-02 15:39:37',
'favorite_count': 3,
'favorited': 0,
'name': 'Heather Mahalik',
'retweet_count': 2,
'text': 'Never forget. http://t.co/L7bjWue1A2',
'timestamp_desc': definitions.TIME_DESCRIPTION_UPDATE,
'user_id': 475222380}
self.CheckEventValues(storage_writer, events[51], expected_event_values)
if __name__ == '__main__':
unittest.main()
| kiddinn/plaso | tests/parsers/sqlite_plugins/twitter_ios.py | Python | apache-2.0 | 3,854 |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster2'],
[TestAction.reboot_vm, 'vm1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup1'],
[TestAction.create_mini_vm, 'vm2', 'cpu=random', 'cluster=cluster1'],
[TestAction.migrate_vm, 'vm1'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.create_volume, 'volume1', 'size=random', 'cluster=cluster2', 'flag=scsi'],
[TestAction.create_volume, 'volume2', 'cluster=cluster2', 'flag=thick,scsi'],
[TestAction.add_image, 'image1', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.delete_vm_backup, 'vm1-backup1'],
[TestAction.delete_image, 'image1'],
[TestAction.recover_image, 'image1'],
[TestAction.delete_image, 'image1'],
[TestAction.expunge_image, 'image1'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup2'],
[TestAction.stop_vm, 'vm1'],
[TestAction.change_vm_ha, 'vm2'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.resize_data_volume, 'volume1', 5*1024*1024],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.detach_volume, 'volume1'],
[TestAction.create_volume, 'volume3', 'cluster=cluster1', 'flag=scsi'],
[TestAction.delete_volume, 'volume3'],
[TestAction.create_volume, 'volume4', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume4'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup3'],
[TestAction.change_vm_ha, 'vm2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.use_volume_backup, 'volume4-backup3'],
[TestAction.start_vm, 'vm2'],
[TestAction.change_vm_ha, 'vm2'],
[TestAction.create_mini_vm, 'vm3', 'memory=random', 'cluster=cluster2'],
[TestAction.delete_volume, 'volume1'],
[TestAction.expunge_volume, 'volume1'],
[TestAction.destroy_vm, 'vm2'],
[TestAction.recover_vm, 'vm2'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup4'],
[TestAction.stop_vm, 'vm1'],
[TestAction.create_image_from_volume, 'vm1', 'vm1-image2'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.attach_volume, 'vm2', 'volume4'],
[TestAction.start_vm, 'vm2'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup5'],
[TestAction.stop_vm, 'vm2'],
[TestAction.use_volume_backup, 'volume4-backup5'],
])
'''
The final status:
Running:[]
Stopped:['vm1', 'vm3', 'vm2']
Enadbled:['vm1-backup2', 'volume4-backup3', 'vm1-backup4', 'volume4-backup5', 'vm1-image2']
attached:['volume4']
Detached:['volume2']
Deleted:['volume3', 'vm1-backup1']
Expunged:['volume1', 'image1']
Ha:[]
Group:
vm_backup2:['vm1-backup4']---vm1@
vm_backup1:['vm1-backup2']---vm1@
''' | zstackio/zstack-woodpecker | integrationtest/vm/mini/multiclusters/paths/multi_path88.py | Python | apache-2.0 | 2,871 |
from django.db.models import Q
from beetle.models import PrincipalGroup, GatewayGroup
from .models import Rule, RuleException
def __get_applicable_rules(from_gateway, from_principal, to_gateway,
to_principal, timestamp=None):
"""Get the queryset of rules that apply for the mapping."""
to_principal_groups = PrincipalGroup.objects.filter(
members__in=[to_principal])
from_principal_groups = PrincipalGroup.objects.filter(
members__in=[from_principal])
to_gateway_groups = GatewayGroup.objects.filter(
members__in=[to_gateway])
from_gateway_groups = GatewayGroup.objects.filter(
members__in=[from_gateway])
rules = Rule.objects.filter(
Q(from_principal=from_principal) | Q(from_principal__name="*") |
Q(from_principal__in=from_principal_groups),
Q(from_gateway=from_gateway) | Q(from_gateway__name="*") |
Q(from_gateway__in=from_gateway_groups),
Q(to_principal=to_principal) | Q(to_principal__name="*") |
Q(to_principal__in=to_principal_groups),
Q(to_gateway=to_gateway) | Q(to_gateway__name="*") |
Q(to_gateway__in=to_gateway_groups),
active=True)
rules = rules.filter(start__lte=timestamp, expire__gte=timestamp) \
| rules.filter(expire__isnull=True)
return rules
def __get_rule_exceptions(rules, from_gateway, from_principal, to_gateway,
to_principal):
"""Get the queryset of rule ids that should be excluded."""
to_principal_groups = PrincipalGroup.objects.filter(
members__in=[to_principal])
from_principal_groups = PrincipalGroup.objects.filter(
members__in=[from_principal])
to_gateway_groups = GatewayGroup.objects.filter(
members__in=[to_gateway])
from_gateway_groups = GatewayGroup.objects.filter(
members__in=[from_gateway])
return RuleException.objects.filter(
Q(from_principal=from_principal) | Q(from_principal__name="*") |
Q(from_principal__in=from_principal_groups),
Q(from_gateway=from_gateway) | Q(from_gateway__name="*") |
Q(from_gateway__in=from_gateway_groups),
Q(to_principal=to_principal) | Q(to_principal__name="*") |
Q(to_principal__in=to_principal_groups),
Q(to_gateway=to_gateway) | Q(to_gateway__name="*") |
Q(to_gateway__in=to_gateway_groups),
rule__in=rules).values_list("rule_id", flat=True).distinct()
def query_can_map_static(from_gateway, from_principal, to_gateway, to_principal,
timestamp):
"""Returns all of the static rules that allow a mapping."""
rules = __get_applicable_rules(from_gateway, from_principal,
to_gateway, to_principal, timestamp)
exceptions = __get_rule_exceptions(rules, from_gateway, from_principal,
to_gateway, to_principal)
rules = rules.exclude(id__in=exceptions)
return rules.exists(), rules
| helena-project/beetle | controller/access/lookup.py | Python | apache-2.0 | 2,657 |
#!/bin/env python
# encoding:utf-8
#
#
#
__Author__ = "CORDEA"
__date__ = "2014-08-21"
from collections import Counter
infile = open("pred_info.vcf", "r")
lines = infile.readlines()
infile.close()
# 0001a56c,chr1,5088617,2169089,44.2448979592,G,092a8efa7cd6427c91900bded95cab33,A,4:37:7:12,14:36:24:23,0:54:5:2,0:88:0:0,2:61:19:15,3:54:14:14,0:96:0:1,2:42:10:12,23:27:25:25,2:54:18:15,0:7:2:5,7:5:20:16,14:27:24:24,5:28:13:9
outFile = open("readID.lst", "w")
c1 = 0
c2 = 0
varDict = {}
for line in lines:
tmp = line.split(",")
ID = tmp[3]
varDict[ID] = float(tmp[4])
pops = [r.rstrip("\r\n") for r in tmp[8:]]
freqList = []
for pop in pops:
tmp = [int(i) for i in pop.split(":")]
SUM = sum(tmp)
freqList.append(tmp[0] / float(SUM))
ctr = len(Counter(freqList))
if ctr == 2:
outFile.write(str(ID) + "\n")
c2 += 1
elif ctr == 1:
c1 += 1
count = 1
for k, v in sorted(varDict.items(), key=lambda x:x[1], reverse=True):
#if count <= 1000:
#outFile.write(str(k) + "," + str(v) + "\n")
count += 1
outFile.close()
print c1
print c2
| CORDEA/analysis_of_1000genomes-data | programs/machine_learning/extract_pred.py | Python | apache-2.0 | 1,133 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import functools
import os
import re
import shutil
import subprocess
import sys
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.python.code_generator import CodeGenerator
from pants.base.build_environment import get_buildroot
from pants.thrift_util import select_thrift_binary
from pants.util.dirutil import safe_mkdir, safe_walk
from pants.util.strutil import ensure_binary
class PythonThriftBuilder(CodeGenerator):
"""Code Generator a Python code from thrift IDL files."""
class UnknownPlatformException(CodeGenerator.Error):
def __init__(self, platform):
super(PythonThriftBuilder.UnknownPlatformException, self).__init__(
'Unknown platform: %s!' % str(platform))
def __init__(self, target, root_dir, config, target_suffix=None):
super(PythonThriftBuilder, self).__init__(target, root_dir, config, target_suffix=target_suffix)
self._workdir = os.path.join(config.getdefault('pants_workdir'), 'thrift', 'py-thrift')
@property
def install_requires(self):
return ['thrift']
def run_thrifts(self):
"""
Generate Python thrift code using thrift compiler specified in pants config.
Thrift fields conflicting with Python keywords are suffixed with a trailing
underscore (e.g.: from_).
"""
def is_py_thrift(target):
return isinstance(target, PythonThriftLibrary)
all_thrifts = set()
def collect_sources(target):
abs_target_base = os.path.join(get_buildroot(), target.target_base)
for source in target.payload.sources.relative_to_buildroot():
source_root_relative_source = os.path.relpath(source, abs_target_base)
all_thrifts.add((target.target_base, source_root_relative_source))
self.target.walk(collect_sources, predicate=is_py_thrift)
copied_sources = set()
for base, relative_source in all_thrifts:
abs_source = os.path.join(base, relative_source)
copied_source = os.path.join(self._workdir, relative_source)
safe_mkdir(os.path.dirname(copied_source))
shutil.copyfile(abs_source, copied_source)
copied_sources.add(copied_source)
for src in copied_sources:
if not self._run_thrift(src):
raise PythonThriftBuilder.CodeGenerationException("Could not generate .py from %s!" % src)
def _run_thrift(self, source):
args = [
select_thrift_binary(self.config),
'--gen',
'py:new_style',
'-o', self.codegen_root,
'-I', self._workdir,
os.path.abspath(source)]
po = subprocess.Popen(args, cwd=self.chroot.path())
rv = po.wait()
if rv != 0:
comm = po.communicate()
print('thrift generation failed!', file=sys.stderr)
print('STDOUT', file=sys.stderr)
print(comm[0], file=sys.stderr)
print('STDERR', file=sys.stderr)
print(comm[1], file=sys.stderr)
return rv == 0
@property
def package_dir(self):
return "gen-py"
def generate(self):
# auto-generate the python files that we bundle up
self.run_thrifts()
# Thrift generates code with all parent namespaces with empty __init__.py's. Generally
# speaking we want to drop anything w/o an __init__.py, and for anything with an __init__.py,
# we want to explicitly make it a namespace package, hence the hoops here.
for root, _, files in safe_walk(os.path.normpath(self.package_root)):
reldir = os.path.relpath(root, self.package_root)
if reldir == '.': # skip root
continue
if '__init__.py' not in files: # skip non-packages
continue
init_py_abspath = os.path.join(root, '__init__.py')
module_path = self.path_to_module(reldir)
self.created_packages.add(module_path)
if os.path.getsize(init_py_abspath) == 0: # empty __init__, translate to namespace package
with open(init_py_abspath, 'wb') as f:
f.write(b"__import__('pkg_resources').declare_namespace(__name__)")
self.created_namespace_packages.add(module_path)
else:
# non-empty __init__, this is a leaf package, usually with ttypes and constants, leave as-is
pass
if not self.created_packages:
raise self.CodeGenerationException('No Thrift structures declared in %s!' % self.target)
| tejal29/pants | src/python/pants/backend/python/thrift_builder.py | Python | apache-2.0 | 4,568 |
"""
Django settings for laboite_erp project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$=ti6ye=h$ytww*)nfylrc9onq!@_(86#1k#s7tuy(4!9_!7+z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'laboite_erp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'laboite_erp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| laboiteproject/laboite-erp | laboite_erp/settings.py | Python | apache-2.0 | 3,111 |
#!/usr/bin/python
"""postprocess"""
import argparse
import ruamel.yaml
import os
def read(filename):
"""return file contents"""
with open(filename, 'r') as file_in:
return file_in.read()
def write(filename, cwl):
"""write to file"""
with open(filename, 'w') as file_out:
file_out.write(cwl)
def main():
"""main function"""
parser = argparse.ArgumentParser(description='postprocess')
parser.add_argument(
'-f',
action="store",
dest="filename_cwl",
help='Name of the cwl file',
required=True
)
params = parser.parse_args()
dir_path = os.path.dirname(os.path.realpath(__file__))
cwl = ruamel.yaml.load(read(params.filename_cwl),
ruamel.yaml.RoundTripLoader)
script_path = os.path.join(dir_path,'filter_vardict.py')
cwl['baseCommand'] = ['python',script_path]
cwl['inputs']['inputVcf']['type'] = ['string', 'File']
cwl['inputs']['hotspotVcf']['type'] = ['null', 'string', 'File']
write(params.filename_cwl, ruamel.yaml.dump(
cwl, Dumper=ruamel.yaml.RoundTripDumper))
if __name__ == "__main__":
main() | rhshah/basicfiltering | vardict/postprocess.py | Python | apache-2.0 | 1,171 |
#!/usr/bin/env python
import SimpleITK as sitk
import skimage as ski
import skimage.segmentation
import numpy as np
import timeit
def mask_label_contour(image, seg):
"""Combine an image and segmentation by masking the segmentation contour.
For an input image (scalar or vector), and a multi-label
segmentation image, creates an output image where the countour of
each label masks the input image to black."""
return sitk.Mask(image, sitk.LabelContour(seg+1)==0)
# this script generates images to compare ski-image SLIC
# implementaiton vs ours.
# We have slightly different parameterizations. The image is 512x512,
# if we target 256 superpixels of size 32x32 we have simular
# parameters for each implementation.
img=sitk.ReadImage("/home/blowekamp/src/scikit-image/skimage/data/astronaut.png")
aimg_lab=ski.color.rgb2lab(sitk.GetArrayFromImage(img))
ski_slic_aimg=skimage.segmentation.slic(aimg_lab,n_segments=256,convert2lab=False)
sitk.WriteImage(mask_label_contour(img, sitk.GetImageFromArray(ski_slic_aimg))
, "astronaut_ski_slic.png")
print(min(timeit.repeat(lambda: skimage.segmentation.slic(aimg_lab,n_segments=256,convert2lab=False), number=1, repeat=5)))
img_lab = sitk.GetImageFromArray(aimg_lab, isVector=True)
sitk_slic_img=sitk.SLIC(img_lab, [32,32], maximumNumberOfIterations=10)
sitk.WriteImage(mask_label_contour(img, sitk_slic_img), "astronaut_sitk_slic.png")
print(min(timeit.repeat(lambda: sitk.SLIC(img_lab, [32,32], maximumNumberOfIterations=10), number=1, repeat=5)))
| blowekamp/itkSuperPixel | doc/scripts/evaluation_ski.py | Python | apache-2.0 | 1,526 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Contains the logic for `aq add interface --network_device`."""
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import NetworkDevice
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.interface import (get_or_create_interface,
check_netdev_iftype)
from aquilon.worker.processes import DSDBRunner
class CommandAddInterfaceNetworkDevice(BrokerCommand):
requires_plenaries = True
required_parameters = ["interface", "network_device", "iftype"]
invalid_parameters = ["automac", "pg", "autopg", "model", "vendor",
"bus_address"]
def render(self, session, logger, plenaries, interface, network_device,
mac, iftype, comments, **arguments):
for arg in self.invalid_parameters:
if arguments.get(arg) is not None:
raise ArgumentError("Cannot use argument --%s when adding an "
"interface to a network device." % arg)
check_netdev_iftype(iftype)
dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True)
oldinfo = DSDBRunner.snapshot_hw(dbnetdev)
get_or_create_interface(session, dbnetdev, name=interface, mac=mac,
interface_type=iftype, comments=comments,
preclude=True)
session.flush()
plenaries.add(dbnetdev)
plenaries.add(dbnetdev.host)
with plenaries.transaction():
dsdb_runner = DSDBRunner(logger=logger)
dsdb_runner.update_host(dbnetdev, oldinfo)
dsdb_runner.commit_or_rollback("Could not update network device in DSDB")
return
| guillaume-philippon/aquilon | lib/aquilon/worker/commands/add_interface_network_device.py | Python | apache-2.0 | 2,483 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Iterable
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.utils.decorators import apply_defaults
class S3ListOperator(BaseOperator):
"""
List all objects from the bucket with the given string prefix in name.
This operator returns a python list with the name of objects which can be
used by `xcom` in the downstream task.
:param bucket: The S3 bucket where to find the objects. (templated)
:type bucket: str
:param prefix: Prefix string to filters the objects whose name begin with
such prefix. (templated)
:type prefix: str
:param delimiter: the delimiter marks key hierarchy. (templated)
:type delimiter: str
:param aws_conn_id: The connection ID to use when connecting to S3 storage.
:type aws_conn_id: str
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type verify: bool or str
**Example**:
The following operator would list all the files
(excluding subfolders) from the S3
``customers/2018/04/`` key in the ``data`` bucket. ::
s3_file = S3ListOperator(
task_id='list_3s_files',
bucket='data',
prefix='customers/2018/04/',
delimiter='/',
aws_conn_id='aws_customers_conn'
)
"""
template_fields = ('bucket', 'prefix', 'delimiter') # type: Iterable[str]
ui_color = '#ffd700'
@apply_defaults
def __init__(self,
bucket,
prefix='',
delimiter='',
aws_conn_id='aws_default',
verify=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.aws_conn_id = aws_conn_id
self.verify = verify
def execute(self, context):
hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
self.log.info(
'Getting the list of files from bucket: %s in prefix: %s (Delimiter {%s)',
self.bucket, self.prefix, self.delimiter
)
return hook.list_keys(
bucket_name=self.bucket,
prefix=self.prefix,
delimiter=self.delimiter)
| Fokko/incubator-airflow | airflow/contrib/operators/s3_list_operator.py | Python | apache-2.0 | 3,690 |
from processor import Processor
file_name = "program_samples\hello.hex"
processor = Processor(file_name)
processor.load()
processor.run()
if __name__ == '__main__':
pass
| AlexLitvino/i8080_simulator | runner.py | Python | apache-2.0 | 177 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from tripleo_common._i18n import _
LOG = logging.getLogger(__name__)
class CloudConfigException(Exception):
"""Base tripleo-common exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception('Exception in string format operation')
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.msg_fmt
super(CloudConfigException, self).__init__(message)
class MissingEnvironment(CloudConfigException):
message = "Required environment variables are not set."
| jprovaznik/tripleo-common | tripleo_common/exception.py | Python | apache-2.0 | 1,750 |
#!/usr/local/bin/python2.7
# -*- coding: utf-8 -*-
__author__ = 'https://github.com/password123456/'
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import requests
import urllib
import urllib2
import json
import datetime
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def get_recent_lotto():
url = 'http://www.nlotto.co.kr/common.do?method=getLottoNumber'
try:
r = requests.get(url)
data = json.loads(r.text)
except Exception, e:
print '%s[-] Exception::%s%s' % (bcolors.WARNING, e, bcolors.ENDC)
sys.exit(0)
else:
r.close()
drwNoDate = data['drwNoDate']
drwNo = data['drwNo']
firstWinamnt = data['firstWinamnt']
drwtNo1 = data['drwtNo1']
drwtNo2 = data['drwtNo2']
drwtNo3 = data['drwtNo3']
drwtNo4 = data['drwtNo4']
drwtNo5 = data['drwtNo5']
drwtNo6 = data['drwtNo6']
bnusNo = data['bnusNo']
# 당첨금 자리수 변환
firstWinamnt = format(firstWinamnt, ',')
content = '** 최근 로또 조회 **\n'
content = content + ' [+] 로또 : http://www.nlotto.co.kr\n [+] 추첨일자: %s\n [+] 회차: %d 회\n [+] 당첨번호: %d %d %d %d %d %d\n [+] 보너스: %d \n [*] 당첨금: %s 원\n' % (drwNoDate, drwNo, drwtNo1, drwtNo2, drwtNo3, drwtNo4, drwtNo5, drwtNo6, bnusNo, firstWinamnt )
#print content
return content
content = get_recent_lotto()
def main():
get_recent_lotto()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(0)
except Exception, e:
print '%s[-] Exception::%s%s' % (bcolors.WARNING, e, bcolors.ENDC)
| password123456/lotto | get_recent_lotto.py | Python | apache-2.0 | 1,787 |
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from lxml import etree
from keystone.logic.types import fault
class User(object):
"""Document me!"""
def __init__(self, password=None, id=None, name=None, tenant_id=None,
email=None, enabled=None, tenant_roles=None):
self.id = id
self.name = name
self.tenant_id = tenant_id
self.password = password
self.email = email
self.enabled = enabled and True or False
self.tenant_roles = tenant_roles
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/identity/api/v2.0}" \
"user")
if root == None:
raise fault.BadRequestFault("Expecting User")
name = root.get("name")
tenant_id = root.get("tenantId")
email = root.get("email")
password = root.get("password")
enabled = root.get("enabled")
if not name:
raise fault.BadRequestFault("Expecting User")
elif not password:
raise fault.BadRequestFault("Expecting User password")
elif not email:
raise fault.BadRequestFault("Expecting User email")
enabled = enabled is None or enabled.lower() in ["true", "yes"]
return User(password, id, name, tenant_id, email, enabled)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse User", str(e))
@staticmethod
def from_json(json_str):
try:
obj = json.loads(json_str)
if not "user" in obj:
raise fault.BadRequestFault("Expecting User")
user = obj["user"]
id = user.get('id', None)
name = user.get('name', None)
if not "password" in user:
raise fault.BadRequestFault("Expecting User Password")
password = user["password"]
if (id == None or len(id.strip()) == 0) and (
name == None or len(name.strip()) == 0):
raise fault.BadRequestFault("Expecting User")
elif password == None or len(password.strip()) == 0:
raise fault.BadRequestFault("Expecting User password")
if "tenantId" in user:
tenant_id = user["tenantId"]
else:
tenant_id = None
if "email" not in user:
raise fault.BadRequestFault("Expecting User Email")
email = user["email"]
if "enabled" in user:
set_enabled = user["enabled"]
if not isinstance(set_enabled, bool):
raise fault.BadRequestFault("Bad enabled attribute!")
else:
set_enabled = True
return User(password, id, name, tenant_id, email, set_enabled)
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse Tenant", str(e))
def to_dom(self):
dom = etree.Element("user",
xmlns="http://docs.openstack.org/identity/api/v2.0")
if self.email:
dom.set("email", unicode(self.email))
if self.tenant_id:
dom.set("tenantId", unicode(self.tenant_id))
if self.id:
dom.set("id", unicode(self.id))
if self.name:
dom.set("name", unicode(self.name))
if self.enabled:
dom.set("enabled", unicode(self.enabled).lower())
if self.password:
dom.set("password", unicode(self.password))
if self.tenant_roles:
dom_roles = etree.Element("tenantRoles")
for role in self.tenant_roles:
dom_role = etree.Element("tenantRole")
dom_role.text = role
dom_roles.append(dom_role)
dom.append(dom_roles)
return dom
def to_xml(self):
return etree.tostring(self.to_dom())
def to_dict(self):
user = {}
if self.id:
user["id"] = unicode(self.id)
if self.name:
user["name"] = unicode(self.name)
if self.tenant_id:
user["tenantId"] = unicode(self.tenant_id)
if self.password:
user["password"] = unicode(self.password)
user["email"] = unicode(self.email)
user["enabled"] = self.enabled
if self.tenant_roles:
user["tenantRoles"] = list(self.tenant_roles)
return {'user': user}
def to_json(self):
return json.dumps(self.to_dict())
class User_Update(object):
"""Document me!"""
def __init__(self, password=None, id=None, name=None, tenant_id=None,
email=None, enabled=None):
self.id = id
self.name = name
self.tenant_id = tenant_id
self.password = password
self.email = email
self.enabled = bool(enabled) if enabled is not None else None
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/identity/api/v2.0}" \
"user")
if root == None:
raise fault.BadRequestFault("Expecting User")
id = root.get("id")
name = root.get("name")
tenant_id = root.get("tenantId")
email = root.get("email")
password = root.get("password")
enabled = root.get("enabled")
if enabled == None or enabled == "true" or enabled == "yes":
set_enabled = True
elif enabled == "false" or enabled == "no":
set_enabled = False
else:
raise fault.BadRequestFault("Bad enabled attribute!")
# TODO: WTF is this?!
if password == '':
password = id
return User(password=password, id=id, name=name,
tenant_id=tenant_id, email=email, enabled=set_enabled)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse User", str(e))
@staticmethod
def from_json(json_str):
try:
obj = json.loads(json_str)
if not "user" in obj:
raise fault.BadRequestFault("Expecting User")
user = obj["user"]
id = user.get('id', None)
name = user.get('name', None)
password = user.get('password', None)
tenant_id = user.get('tenantId', None)
email = user.get('email', None)
enabled = user.get('enabled', True)
if not isinstance(enabled, bool):
raise fault.BadRequestFault("Bad enabled attribute!")
# TODO: WTF is this?!
if password == '':
password = id
return User(password, id, name, tenant_id, email, enabled)
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse Tenant", str(e))
def to_dom(self):
dom = etree.Element("user",
xmlns="http://docs.openstack.org/identity/api/v2.0")
if self.email:
dom.set("email", unicode(self.email))
if self.tenant_id:
dom.set("tenantId", unicode(self.tenant_id))
if self.id:
dom.set("id", unicode(self.id))
if self.name:
dom.set("name", unicode(self.name))
if self.enabled is not None:
dom.set("enabled", unicode(self.enabled).lower())
if self.password:
dom.set("password", unicode(self.password))
return dom
def to_xml(self):
return etree.tostring(self.to_dom())
def to_dict(self):
user = {}
if self.id:
user["id"] = unicode(self.id)
if self.name:
user["name"] = unicode(self.name)
if self.tenant_id:
user["tenantId"] = unicode(self.tenant_id)
if self.password:
user["password"] = unicode(self.password)
if self.email:
user["email"] = unicode(self.email)
if self.enabled is not None:
user["enabled"] = self.enabled
return {'user': user}
def to_json(self):
return json.dumps(self.to_dict())
class Users(object):
"""A collection of users."""
def __init__(self, values, links):
self.values = values
self.links = links
def to_xml(self):
dom = etree.Element("users")
dom.set(u"xmlns", "http://docs.openstack.org/identity/api/v2.0")
for t in self.values:
dom.append(t.to_dom())
for t in self.links:
dom.append(t.to_dom())
return etree.tostring(dom)
def to_json(self):
values = [t.to_dict()["user"] for t in self.values]
links = [t.to_dict()["links"] for t in self.links]
return json.dumps({"users": values, "users_links": links})
| genius1611/Keystone | keystone/logic/types/user.py | Python | apache-2.0 | 9,678 |
# Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
from sqlalchemy import Column, ForeignKey, CheckConstraint, \
PrimaryKeyConstraint, func, or_, and_, true, literal_column, \
select, cast, TEXT
from sqlalchemy.orm import relationship, backref, Query
from sqlalchemy.types import BigInteger, Enum, Integer
from pycroft.model.base import ModelBase
from pycroft.model.ddl import DDLManager, Function, Trigger, View
from pycroft.model.types import DateTimeTz
from pycroft.model.user import User
from pycroft.model.host import IP, Host, Interface
ddl = DDLManager()
class TrafficEvent:
timestamp = Column(DateTimeTz, server_default=func.current_timestamp(), nullable=False)
amount = Column(BigInteger, CheckConstraint('amount >= 0'),
nullable=False)
class TrafficVolume(TrafficEvent, ModelBase):
__table_args__ = (
PrimaryKeyConstraint('ip_id', 'type', 'timestamp'),
)
type = Column(Enum("Ingress", "Egress", name="traffic_direction"),
nullable=False)
ip_id = Column(Integer, ForeignKey(IP.id, ondelete="CASCADE"),
nullable=False, index=True)
ip = relationship(IP, backref=backref("traffic_volumes",
cascade="all, delete-orphan",
cascade_backrefs=False))
user_id = Column(Integer, ForeignKey(User.id, ondelete='CASCADE'),
nullable=True, index=True)
user = relationship(User,
backref=backref("traffic_volumes",
cascade="all, delete-orphan",
cascade_backrefs=False),
uselist=False)
packets = Column(Integer, CheckConstraint('packets >= 0'),
nullable=False)
TrafficVolume.__table__.add_is_dependent_on(IP.__table__)
pmacct_traffic_egress = View(
name='pmacct_traffic_egress',
query=(
Query([])
.add_columns(TrafficVolume.packets.label('packets'),
TrafficVolume.amount.label('bytes'),
TrafficVolume.timestamp.label('stamp_inserted'),
TrafficVolume.timestamp.label('stamp_updated'),
IP.address.label('ip_src'))
.select_from(TrafficVolume)
.filter_by(type='Egress')
.join(IP)
.statement # turns our `Selectable` into something compilable
),
)
ddl.add_view(TrafficVolume.__table__, pmacct_traffic_egress)
pmacct_expression_replacements = dict(
tv_tname=TrafficVolume.__tablename__,
tv_type=TrafficVolume.type.key,
tv_ip_id=TrafficVolume.ip_id.key,
tv_timestamp=TrafficVolume.timestamp.key,
tv_amount=TrafficVolume.amount.key,
tv_packets=TrafficVolume.packets.key,
tv_user_id=TrafficVolume.user_id.key,
ip_tname=IP.__tablename__,
ip_id=str(IP.id.expression),
ip_interface_id=str(IP.interface_id.expression),
ip_address=str(IP.address.expression),
host_tname=Host.__tablename__,
host_id=str(Host.id.expression),
host_owner_id=str(Host.owner_id.expression),
interface_tname=Interface.__tablename__,
interface_id=str(Interface.id.expression),
interface_host_id=str(Interface.host_id.expression),
)
pmacct_egress_upsert = Function(
name="pmacct_traffic_egress_insert", arguments=[], language="plpgsql", rtype="trigger",
definition="""BEGIN
INSERT INTO traffic_volume ({tv_type}, {tv_ip_id}, "{tv_timestamp}", {tv_amount}, {tv_packets}, {tv_user_id})
SELECT
'Egress',
{ip_id},
date_trunc('day', NEW.stamp_inserted),
NEW.bytes,
NEW.packets,
{host_owner_id}
FROM {ip_tname}
JOIN {interface_tname} ON {ip_interface_id} = {interface_id}
JOIN {host_tname} ON {interface_host_id} = {host_id}
WHERE NEW.ip_src = {ip_address}
ON CONFLICT ({tv_ip_id}, {tv_type}, "{tv_timestamp}")
DO UPDATE SET ({tv_amount}, {tv_packets}) = ({tv_tname}.{tv_amount} + NEW.bytes,
{tv_tname}.{tv_packets} + NEW.packets);
RETURN NULL;
END;""".format(**pmacct_expression_replacements),
)
pmacct_egress_upsert_trigger = Trigger(
name='pmacct_traffic_egress_insert_trigger', table=pmacct_traffic_egress.table,
events=["INSERT"], function_call="pmacct_traffic_egress_insert()", when="INSTEAD OF"
)
ddl.add_function(TrafficVolume.__table__, pmacct_egress_upsert)
ddl.add_trigger(TrafficVolume.__table__, pmacct_egress_upsert_trigger)
pmacct_traffic_ingress = View(
name='pmacct_traffic_ingress',
query=(
Query([])
.add_columns(TrafficVolume.packets.label('packets'),
TrafficVolume.amount.label('bytes'),
TrafficVolume.timestamp.label('stamp_inserted'),
TrafficVolume.timestamp.label('stamp_updated'),
IP.address.label('ip_dst'))
.select_from(TrafficVolume)
.filter_by(type='Ingress')
.join(IP)
.statement # turns our `Selectable` into something compilable
),
)
ddl.add_view(TrafficVolume.__table__, pmacct_traffic_ingress)
pmacct_ingress_upsert = Function(
name="pmacct_traffic_ingress_insert", arguments=[], language="plpgsql", rtype="trigger",
definition="""BEGIN
INSERT INTO traffic_volume ({tv_type}, {tv_ip_id}, "{tv_timestamp}", {tv_amount}, {tv_packets}, {tv_user_id})
SELECT
'Ingress',
{ip_id},
date_trunc('day', NEW.stamp_inserted),
NEW.bytes,
NEW.packets,
{host_owner_id}
FROM {ip_tname}
JOIN {interface_tname} ON {ip_interface_id} = {interface_id}
JOIN {host_tname} ON {interface_host_id} = {host_id}
WHERE NEW.ip_dst = {ip_address}
ON CONFLICT ({tv_ip_id}, {tv_type}, "{tv_timestamp}")
DO UPDATE SET ({tv_amount}, {tv_packets}) = ({tv_tname}.{tv_amount} + NEW.bytes,
{tv_tname}.{tv_packets} + NEW.packets);
RETURN NULL;
END;""".format(**pmacct_expression_replacements),
)
pmacct_ingress_upsert_trigger = Trigger(
name='pmacct_traffic_ingress_insert_trigger', table=pmacct_traffic_ingress.table,
events=["INSERT"], function_call="pmacct_traffic_ingress_insert()", when="INSTEAD OF"
)
ddl.add_function(TrafficVolume.__table__, pmacct_ingress_upsert)
ddl.add_trigger(TrafficVolume.__table__, pmacct_ingress_upsert_trigger)
def traffic_history_query():
events = (select(func.sum(TrafficVolume.amount).label('amount'),
literal_column('day'),
cast(TrafficVolume.type, TEXT).label('type')
)
.select_from(
func.generate_series(
func.date_trunc('day', literal_column('arg_start')),
func.date_trunc('day', literal_column('arg_end')),
'1 day'
).alias('day')
.outerjoin(TrafficVolume.__table__, and_(
func.date_trunc('day', TrafficVolume.timestamp) == literal_column('day'),
TrafficVolume.user_id == literal_column('arg_user_id'))
)
)
.group_by(literal_column('day'), literal_column('type'))
).cte()
events_ingress = select(events).where(or_(events.c.type == 'Ingress', events.c.type == None)).cte()
events_egress = select(events).where(or_(events.c.type == 'Egress', events.c.type == None)).cte()
hist = (select(func.coalesce(events_ingress.c.day, events_egress.c.day).label('timestamp'),
events_ingress.c.amount.label('ingress'),
events_egress.c.amount.label('egress'))
.select_from(events_ingress.join(events_egress,
events_ingress.c.day == events_egress.c.day,
full=true))
.order_by(literal_column('timestamp'))
)
return hist
traffic_history_function = Function(
'traffic_history', ['arg_user_id int', 'arg_start timestamptz', 'arg_end timestamptz'],
'TABLE ("timestamp" timestamptz, ingress numeric, egress numeric)',
definition=traffic_history_query(),
volatility='stable',
)
ddl.add_function(
TrafficVolume.__table__,
traffic_history_function
)
class TrafficHistoryEntry:
def __init__(self, timestamp, ingress, egress):
self.timestamp = timestamp
self.ingress = ingress or 0
self.egress = egress or 0
def __repr__(self):
return str(self.__dict__)
ddl.register()
| agdsn/pycroft | pycroft/model/traffic.py | Python | apache-2.0 | 8,996 |
#!/usr/bin/env python
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run xDS integration tests on GCP using Traffic Director."""
import argparse
import datetime
import json
import logging
import os
import random
import shlex
import socket
import subprocess
import sys
import tempfile
import time
import uuid
from google.protobuf import json_format
import googleapiclient.discovery
import grpc
from oauth2client.client import GoogleCredentials
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
from src.proto.grpc.health.v1 import health_pb2
from src.proto.grpc.health.v1 import health_pb2_grpc
from src.proto.grpc.testing import empty_pb2
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2_grpc
# Envoy protos provided by PyPI package xds-protos
# Needs to import the generated Python file to load descriptors
try:
from envoy.extensions.filters.common.fault.v3 import fault_pb2
from envoy.extensions.filters.http.fault.v3 import fault_pb2
from envoy.extensions.filters.http.router.v3 import router_pb2
from envoy.extensions.filters.network.http_connection_manager.v3 import \
http_connection_manager_pb2
from envoy.service.status.v3 import csds_pb2
from envoy.service.status.v3 import csds_pb2_grpc
except ImportError:
# These protos are required by CSDS test. We should not fail the entire
# script for one test case.
pass
logger = logging.getLogger()
console_handler = logging.StreamHandler()
formatter = logging.Formatter(fmt='%(asctime)s: %(levelname)-8s %(message)s')
console_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(console_handler)
logger.setLevel(logging.WARNING)
# Suppress excessive logs for gRPC Python
original_grpc_trace = os.environ.pop('GRPC_TRACE', None)
original_grpc_verbosity = os.environ.pop('GRPC_VERBOSITY', None)
# Suppress not-essential logs for GCP clients
logging.getLogger('google_auth_httplib2').setLevel(logging.WARNING)
logging.getLogger('googleapiclient.discovery').setLevel(logging.WARNING)
_TEST_CASES = [
'backends_restart',
'change_backend_service',
'gentle_failover',
'load_report_based_failover',
'ping_pong',
'remove_instance_group',
'round_robin',
'secondary_locality_gets_no_requests_on_partial_primary_failure',
'secondary_locality_gets_requests_on_primary_failure',
'traffic_splitting',
'path_matching',
'header_matching',
'api_listener',
'forwarding_rule_port_match',
'forwarding_rule_default_port',
'metadata_filter',
]
# Valid test cases, but not in all. So the tests can only run manually, and
# aren't enabled automatically for all languages.
#
# TODO: Move them into _TEST_CASES when support is ready in all languages.
_ADDITIONAL_TEST_CASES = [
'circuit_breaking',
'timeout',
'fault_injection',
'csds',
]
# Test cases that require the V3 API. Skipped in older runs.
_V3_TEST_CASES = frozenset(['timeout', 'fault_injection', 'csds'])
# Test cases that require the alpha API. Skipped for stable API runs.
_ALPHA_TEST_CASES = frozenset(['timeout'])
def parse_test_cases(arg):
if arg == '':
return []
arg_split = arg.split(',')
test_cases = set()
all_test_cases = _TEST_CASES + _ADDITIONAL_TEST_CASES
for arg in arg_split:
if arg == "all":
test_cases = test_cases.union(_TEST_CASES)
else:
test_cases = test_cases.union([arg])
if not all([test_case in all_test_cases for test_case in test_cases]):
raise Exception('Failed to parse test cases %s' % arg)
# Perserve order.
return [x for x in all_test_cases if x in test_cases]
def parse_port_range(port_arg):
try:
port = int(port_arg)
return list(range(port, port + 1))
except:
port_min, port_max = port_arg.split(':')
return list(range(int(port_min), int(port_max) + 1))
argp = argparse.ArgumentParser(description='Run xDS interop tests on GCP')
# TODO(zdapeng): remove default value of project_id and project_num
argp.add_argument('--project_id', default='grpc-testing', help='GCP project id')
argp.add_argument('--project_num',
default='830293263384',
help='GCP project number')
argp.add_argument(
'--gcp_suffix',
default='',
help='Optional suffix for all generated GCP resource names. Useful to '
'ensure distinct names across test runs.')
argp.add_argument(
'--test_case',
default='ping_pong',
type=parse_test_cases,
help='Comma-separated list of test cases to run. Available tests: %s, '
'(or \'all\' to run every test). '
'Alternative tests not included in \'all\': %s' %
(','.join(_TEST_CASES), ','.join(_ADDITIONAL_TEST_CASES)))
argp.add_argument(
'--bootstrap_file',
default='',
help='File to reference via GRPC_XDS_BOOTSTRAP. Disables built-in '
'bootstrap generation')
argp.add_argument(
'--xds_v3_support',
default=False,
action='store_true',
help='Support xDS v3 via GRPC_XDS_EXPERIMENTAL_V3_SUPPORT. '
'If a pre-created bootstrap file is provided via the --bootstrap_file '
'parameter, it should include xds_v3 in its server_features field.')
argp.add_argument(
'--client_cmd',
default=None,
help='Command to launch xDS test client. {server_uri}, {stats_port} and '
'{qps} references will be replaced using str.format(). GRPC_XDS_BOOTSTRAP '
'will be set for the command')
argp.add_argument(
'--client_hosts',
default=None,
help='Comma-separated list of hosts running client processes. If set, '
'--client_cmd is ignored and client processes are assumed to be running on '
'the specified hosts.')
argp.add_argument('--zone', default='us-central1-a')
argp.add_argument('--secondary_zone',
default='us-west1-b',
help='Zone to use for secondary TD locality tests')
argp.add_argument('--qps', default=100, type=int, help='Client QPS')
argp.add_argument(
'--wait_for_backend_sec',
default=1200,
type=int,
help='Time limit for waiting for created backend services to report '
'healthy when launching or updated GCP resources')
argp.add_argument(
'--use_existing_gcp_resources',
default=False,
action='store_true',
help=
'If set, find and use already created GCP resources instead of creating new'
' ones.')
argp.add_argument(
'--keep_gcp_resources',
default=False,
action='store_true',
help=
'Leave GCP VMs and configuration running after test. Default behavior is '
'to delete when tests complete.')
argp.add_argument('--halt_after_fail',
action='store_true',
help='Halt and save the resources when test failed.')
argp.add_argument(
'--compute_discovery_document',
default=None,
type=str,
help=
'If provided, uses this file instead of retrieving via the GCP discovery '
'API')
argp.add_argument(
'--alpha_compute_discovery_document',
default=None,
type=str,
help='If provided, uses this file instead of retrieving via the alpha GCP '
'discovery API')
argp.add_argument('--network',
default='global/networks/default',
help='GCP network to use')
_DEFAULT_PORT_RANGE = '8080:8280'
argp.add_argument('--service_port_range',
default=_DEFAULT_PORT_RANGE,
type=parse_port_range,
help='Listening port for created gRPC backends. Specified as '
'either a single int or as a range in the format min:max, in '
'which case an available port p will be chosen s.t. min <= p '
'<= max')
argp.add_argument(
'--stats_port',
default=8079,
type=int,
help='Local port for the client process to expose the LB stats service')
argp.add_argument('--xds_server',
default='trafficdirector.googleapis.com:443',
help='xDS server')
argp.add_argument('--source_image',
default='projects/debian-cloud/global/images/family/debian-9',
help='Source image for VMs created during the test')
argp.add_argument('--path_to_server_binary',
default=None,
type=str,
help='If set, the server binary must already be pre-built on '
'the specified source image')
argp.add_argument('--machine_type',
default='e2-standard-2',
help='Machine type for VMs created during the test')
argp.add_argument(
'--instance_group_size',
default=2,
type=int,
help='Number of VMs to create per instance group. Certain test cases (e.g., '
'round_robin) may not give meaningful results if this is set to a value '
'less than 2.')
argp.add_argument('--verbose',
help='verbose log output',
default=False,
action='store_true')
# TODO(ericgribkoff) Remove this param once the sponge-formatted log files are
# visible in all test environments.
argp.add_argument('--log_client_output',
help='Log captured client output',
default=False,
action='store_true')
# TODO(ericgribkoff) Remove this flag once all test environments are verified to
# have access to the alpha compute APIs.
argp.add_argument('--only_stable_gcp_apis',
help='Do not use alpha compute APIs. Some tests may be '
'incompatible with this option (gRPC health checks are '
'currently alpha and required for simulating server failure',
default=False,
action='store_true')
args = argp.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
CLIENT_HOSTS = []
if args.client_hosts:
CLIENT_HOSTS = args.client_hosts.split(',')
# Each of the config propagation in the control plane should finish within 600s.
# Otherwise, it indicates a bug in the control plane. The config propagation
# includes all kinds of traffic config update, like updating urlMap, creating
# the resources for the first time, updating BackendService, and changing the
# status of endpoints in BackendService.
_WAIT_FOR_URL_MAP_PATCH_SEC = 600
# In general, fetching load balancing stats only takes ~10s. However, slow
# config update could lead to empty EDS or similar symptoms causing the
# connection to hang for a long period of time. So, we want to extend the stats
# wait time to be the same as urlMap patch time.
_WAIT_FOR_STATS_SEC = _WAIT_FOR_URL_MAP_PATCH_SEC
_DEFAULT_SERVICE_PORT = 80
_WAIT_FOR_BACKEND_SEC = args.wait_for_backend_sec
_WAIT_FOR_OPERATION_SEC = 1200
_INSTANCE_GROUP_SIZE = args.instance_group_size
_NUM_TEST_RPCS = 10 * args.qps
_CONNECTION_TIMEOUT_SEC = 60
_GCP_API_RETRIES = 5
_BOOTSTRAP_TEMPLATE = """
{{
"node": {{
"id": "{node_id}",
"metadata": {{
"TRAFFICDIRECTOR_NETWORK_NAME": "%s",
"com.googleapis.trafficdirector.config_time_trace": "TRUE"
}},
"locality": {{
"zone": "%s"
}}
}},
"xds_servers": [{{
"server_uri": "%s",
"channel_creds": [
{{
"type": "google_default",
"config": {{}}
}}
],
"server_features": {server_features}
}}]
}}""" % (args.network.split('/')[-1], args.zone, args.xds_server)
# TODO(ericgribkoff) Add change_backend_service to this list once TD no longer
# sends an update with no localities when adding the MIG to the backend service
# can race with the URL map patch.
_TESTS_TO_FAIL_ON_RPC_FAILURE = ['ping_pong', 'round_robin']
# Tests that run UnaryCall and EmptyCall.
_TESTS_TO_RUN_MULTIPLE_RPCS = ['path_matching', 'header_matching']
# Tests that make UnaryCall with test metadata.
_TESTS_TO_SEND_METADATA = ['header_matching']
_TEST_METADATA_KEY = 'xds_md'
_TEST_METADATA_VALUE_UNARY = 'unary_yranu'
_TEST_METADATA_VALUE_EMPTY = 'empty_ytpme'
# Extra RPC metadata whose value is a number, sent with UnaryCall only.
_TEST_METADATA_NUMERIC_KEY = 'xds_md_numeric'
_TEST_METADATA_NUMERIC_VALUE = '159'
_PATH_MATCHER_NAME = 'path-matcher'
_BASE_TEMPLATE_NAME = 'test-template'
_BASE_INSTANCE_GROUP_NAME = 'test-ig'
_BASE_HEALTH_CHECK_NAME = 'test-hc'
_BASE_FIREWALL_RULE_NAME = 'test-fw-rule'
_BASE_BACKEND_SERVICE_NAME = 'test-backend-service'
_BASE_URL_MAP_NAME = 'test-map'
_BASE_SERVICE_HOST = 'grpc-test'
_BASE_TARGET_PROXY_NAME = 'test-target-proxy'
_BASE_FORWARDING_RULE_NAME = 'test-forwarding-rule'
_TEST_LOG_BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../reports')
_SPONGE_LOG_NAME = 'sponge_log.log'
_SPONGE_XML_NAME = 'sponge_log.xml'
def get_client_stats(num_rpcs, timeout_sec):
if CLIENT_HOSTS:
hosts = CLIENT_HOSTS
else:
hosts = ['localhost']
for host in hosts:
with grpc.insecure_channel('%s:%d' %
(host, args.stats_port)) as channel:
stub = test_pb2_grpc.LoadBalancerStatsServiceStub(channel)
request = messages_pb2.LoadBalancerStatsRequest()
request.num_rpcs = num_rpcs
request.timeout_sec = timeout_sec
rpc_timeout = timeout_sec + _CONNECTION_TIMEOUT_SEC
logger.debug('Invoking GetClientStats RPC to %s:%d:', host,
args.stats_port)
response = stub.GetClientStats(request,
wait_for_ready=True,
timeout=rpc_timeout)
logger.debug('Invoked GetClientStats RPC to %s: %s', host,
json_format.MessageToJson(response))
return response
def get_client_accumulated_stats():
if CLIENT_HOSTS:
hosts = CLIENT_HOSTS
else:
hosts = ['localhost']
for host in hosts:
with grpc.insecure_channel('%s:%d' %
(host, args.stats_port)) as channel:
stub = test_pb2_grpc.LoadBalancerStatsServiceStub(channel)
request = messages_pb2.LoadBalancerAccumulatedStatsRequest()
logger.debug('Invoking GetClientAccumulatedStats RPC to %s:%d:',
host, args.stats_port)
response = stub.GetClientAccumulatedStats(
request, wait_for_ready=True, timeout=_CONNECTION_TIMEOUT_SEC)
logger.debug('Invoked GetClientAccumulatedStats RPC to %s: %s',
host, response)
return response
def get_client_xds_config_dump():
if CLIENT_HOSTS:
hosts = CLIENT_HOSTS
else:
hosts = ['localhost']
for host in hosts:
server_address = '%s:%d' % (host, args.stats_port)
with grpc.insecure_channel(server_address) as channel:
stub = csds_pb2_grpc.ClientStatusDiscoveryServiceStub(channel)
logger.debug('Fetching xDS config dump from %s', server_address)
response = stub.FetchClientStatus(csds_pb2.ClientStatusRequest(),
wait_for_ready=True,
timeout=_CONNECTION_TIMEOUT_SEC)
logger.debug('Fetched xDS config dump from %s', server_address)
if len(response.config) != 1:
logger.error('Unexpected number of ClientConfigs %d: %s',
len(response.config), response)
return None
else:
# Converting the ClientStatusResponse into JSON, because many
# fields are packed in google.protobuf.Any. It will require many
# duplicated code to unpack proto message and inspect values.
return json_format.MessageToDict(
response.config[0], preserving_proto_field_name=True)
def configure_client(rpc_types, metadata=[], timeout_sec=None):
if CLIENT_HOSTS:
hosts = CLIENT_HOSTS
else:
hosts = ['localhost']
for host in hosts:
with grpc.insecure_channel('%s:%d' %
(host, args.stats_port)) as channel:
stub = test_pb2_grpc.XdsUpdateClientConfigureServiceStub(channel)
request = messages_pb2.ClientConfigureRequest()
request.types.extend(rpc_types)
for rpc_type, md_key, md_value in metadata:
md = request.metadata.add()
md.type = rpc_type
md.key = md_key
md.value = md_value
if timeout_sec:
request.timeout_sec = timeout_sec
logger.debug(
'Invoking XdsUpdateClientConfigureService RPC to %s:%d: %s',
host, args.stats_port, request)
stub.Configure(request,
wait_for_ready=True,
timeout=_CONNECTION_TIMEOUT_SEC)
logger.debug('Invoked XdsUpdateClientConfigureService RPC to %s',
host)
class RpcDistributionError(Exception):
pass
def _verify_rpcs_to_given_backends(backends, timeout_sec, num_rpcs,
allow_failures):
start_time = time.time()
error_msg = None
logger.debug('Waiting for %d sec until backends %s receive load' %
(timeout_sec, backends))
while time.time() - start_time <= timeout_sec:
error_msg = None
stats = get_client_stats(num_rpcs, timeout_sec)
rpcs_by_peer = stats.rpcs_by_peer
for backend in backends:
if backend not in rpcs_by_peer:
error_msg = 'Backend %s did not receive load' % backend
break
if not error_msg and len(rpcs_by_peer) > len(backends):
error_msg = 'Unexpected backend received load: %s' % rpcs_by_peer
if not allow_failures and stats.num_failures > 0:
error_msg = '%d RPCs failed' % stats.num_failures
if not error_msg:
return
raise RpcDistributionError(error_msg)
def wait_until_all_rpcs_go_to_given_backends_or_fail(backends,
timeout_sec,
num_rpcs=_NUM_TEST_RPCS):
_verify_rpcs_to_given_backends(backends,
timeout_sec,
num_rpcs,
allow_failures=True)
def wait_until_all_rpcs_go_to_given_backends(backends,
timeout_sec,
num_rpcs=_NUM_TEST_RPCS):
_verify_rpcs_to_given_backends(backends,
timeout_sec,
num_rpcs,
allow_failures=False)
def wait_until_no_rpcs_go_to_given_backends(backends, timeout_sec):
start_time = time.time()
while time.time() - start_time <= timeout_sec:
stats = get_client_stats(_NUM_TEST_RPCS, timeout_sec)
error_msg = None
rpcs_by_peer = stats.rpcs_by_peer
for backend in backends:
if backend in rpcs_by_peer:
error_msg = 'Unexpected backend %s receives load' % backend
break
if not error_msg:
return
raise Exception('Unexpected RPCs going to given backends')
def wait_until_rpcs_in_flight(rpc_type, timeout_sec, num_rpcs, threshold):
'''Block until the test client reaches the state with the given number
of RPCs being outstanding stably.
Args:
rpc_type: A string indicating the RPC method to check for. Either
'UnaryCall' or 'EmptyCall'.
timeout_sec: Maximum number of seconds to wait until the desired state
is reached.
num_rpcs: Expected number of RPCs to be in-flight.
threshold: Number within [0,100], the tolerable percentage by which
the actual number of RPCs in-flight can differ from the expected number.
'''
if threshold < 0 or threshold > 100:
raise ValueError('Value error: Threshold should be between 0 to 100')
threshold_fraction = threshold / 100.0
start_time = time.time()
error_msg = None
logger.debug(
'Waiting for %d sec until %d %s RPCs (with %d%% tolerance) in-flight' %
(timeout_sec, num_rpcs, rpc_type, threshold))
while time.time() - start_time <= timeout_sec:
error_msg = _check_rpcs_in_flight(rpc_type, num_rpcs, threshold,
threshold_fraction)
if error_msg:
logger.debug('Progress: %s', error_msg)
time.sleep(2)
else:
break
# Ensure the number of outstanding RPCs is stable.
if not error_msg:
time.sleep(5)
error_msg = _check_rpcs_in_flight(rpc_type, num_rpcs, threshold,
threshold_fraction)
if error_msg:
raise Exception("Wrong number of %s RPCs in-flight: %s" %
(rpc_type, error_msg))
def _check_rpcs_in_flight(rpc_type, num_rpcs, threshold, threshold_fraction):
error_msg = None
stats = get_client_accumulated_stats()
rpcs_started = stats.num_rpcs_started_by_method[rpc_type]
rpcs_succeeded = stats.num_rpcs_succeeded_by_method[rpc_type]
rpcs_failed = stats.num_rpcs_failed_by_method[rpc_type]
rpcs_in_flight = rpcs_started - rpcs_succeeded - rpcs_failed
if rpcs_in_flight < (num_rpcs * (1 - threshold_fraction)):
error_msg = ('actual(%d) < expected(%d - %d%%)' %
(rpcs_in_flight, num_rpcs, threshold))
elif rpcs_in_flight > (num_rpcs * (1 + threshold_fraction)):
error_msg = ('actual(%d) > expected(%d + %d%%)' %
(rpcs_in_flight, num_rpcs, threshold))
return error_msg
def compare_distributions(actual_distribution, expected_distribution,
threshold):
"""Compare if two distributions are similar.
Args:
actual_distribution: A list of floats, contains the actual distribution.
expected_distribution: A list of floats, contains the expected distribution.
threshold: Number within [0,100], the threshold percentage by which the
actual distribution can differ from the expected distribution.
Returns:
The similarity between the distributions as a boolean. Returns true if the
actual distribution lies within the threshold of the expected
distribution, false otherwise.
Raises:
ValueError: if threshold is not with in [0,100].
Exception: containing detailed error messages.
"""
if len(expected_distribution) != len(actual_distribution):
raise Exception(
'Error: expected and actual distributions have different size (%d vs %d)'
% (len(expected_distribution), len(actual_distribution)))
if threshold < 0 or threshold > 100:
raise ValueError('Value error: Threshold should be between 0 to 100')
threshold_fraction = threshold / 100.0
for expected, actual in zip(expected_distribution, actual_distribution):
if actual < (expected * (1 - threshold_fraction)):
raise Exception("actual(%f) < expected(%f-%d%%)" %
(actual, expected, threshold))
if actual > (expected * (1 + threshold_fraction)):
raise Exception("actual(%f) > expected(%f+%d%%)" %
(actual, expected, threshold))
return True
def compare_expected_instances(stats, expected_instances):
"""Compare if stats have expected instances for each type of RPC.
Args:
stats: LoadBalancerStatsResponse reported by interop client.
expected_instances: a dict with key as the RPC type (string), value as
the expected backend instances (list of strings).
Returns:
Returns true if the instances are expected. False if not.
"""
for rpc_type, expected_peers in list(expected_instances.items()):
rpcs_by_peer_for_type = stats.rpcs_by_method[rpc_type]
rpcs_by_peer = rpcs_by_peer_for_type.rpcs_by_peer if rpcs_by_peer_for_type else None
logger.debug('rpc: %s, by_peer: %s', rpc_type, rpcs_by_peer)
peers = list(rpcs_by_peer.keys())
if set(peers) != set(expected_peers):
logger.info('unexpected peers for %s, got %s, want %s', rpc_type,
peers, expected_peers)
return False
return True
def test_backends_restart(gcp, backend_service, instance_group):
logger.info('Running test_backends_restart')
instance_names = get_instance_names(gcp, instance_group)
num_instances = len(instance_names)
start_time = time.time()
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_STATS_SEC)
try:
resize_instance_group(gcp, instance_group, 0)
wait_until_all_rpcs_go_to_given_backends_or_fail([],
_WAIT_FOR_BACKEND_SEC)
finally:
resize_instance_group(gcp, instance_group, num_instances)
wait_for_healthy_backends(gcp, backend_service, instance_group)
new_instance_names = get_instance_names(gcp, instance_group)
wait_until_all_rpcs_go_to_given_backends(new_instance_names,
_WAIT_FOR_BACKEND_SEC)
def test_change_backend_service(gcp, original_backend_service, instance_group,
alternate_backend_service,
same_zone_instance_group):
logger.info('Running test_change_backend_service')
original_backend_instances = get_instance_names(gcp, instance_group)
alternate_backend_instances = get_instance_names(gcp,
same_zone_instance_group)
patch_backend_service(gcp, alternate_backend_service,
[same_zone_instance_group])
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
wait_for_healthy_backends(gcp, alternate_backend_service,
same_zone_instance_group)
wait_until_all_rpcs_go_to_given_backends(original_backend_instances,
_WAIT_FOR_STATS_SEC)
passed = True
try:
patch_url_map_backend_service(gcp, alternate_backend_service)
wait_until_all_rpcs_go_to_given_backends(alternate_backend_instances,
_WAIT_FOR_URL_MAP_PATCH_SEC)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
def test_gentle_failover(gcp,
backend_service,
primary_instance_group,
secondary_instance_group,
swapped_primary_and_secondary=False):
logger.info('Running test_gentle_failover')
num_primary_instances = len(get_instance_names(gcp, primary_instance_group))
min_instances_for_gentle_failover = 3 # Need >50% failure to start failover
passed = True
try:
if num_primary_instances < min_instances_for_gentle_failover:
resize_instance_group(gcp, primary_instance_group,
min_instances_for_gentle_failover)
patch_backend_service(
gcp, backend_service,
[primary_instance_group, secondary_instance_group])
primary_instance_names = get_instance_names(gcp, primary_instance_group)
secondary_instance_names = get_instance_names(gcp,
secondary_instance_group)
wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
wait_for_healthy_backends(gcp, backend_service,
secondary_instance_group)
wait_until_all_rpcs_go_to_given_backends(primary_instance_names,
_WAIT_FOR_STATS_SEC)
instances_to_stop = primary_instance_names[:-1]
remaining_instances = primary_instance_names[-1:]
try:
set_serving_status(instances_to_stop,
gcp.service_port,
serving=False)
wait_until_all_rpcs_go_to_given_backends(
remaining_instances + secondary_instance_names,
_WAIT_FOR_BACKEND_SEC)
finally:
set_serving_status(primary_instance_names,
gcp.service_port,
serving=True)
except RpcDistributionError as e:
if not swapped_primary_and_secondary and is_primary_instance_group(
gcp, secondary_instance_group):
# Swap expectation of primary and secondary instance groups.
test_gentle_failover(gcp,
backend_service,
secondary_instance_group,
primary_instance_group,
swapped_primary_and_secondary=True)
else:
passed = False
raise e
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_backend_service(gcp, backend_service,
[primary_instance_group])
resize_instance_group(gcp, primary_instance_group,
num_primary_instances)
instance_names = get_instance_names(gcp, primary_instance_group)
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_BACKEND_SEC)
def test_load_report_based_failover(gcp, backend_service,
primary_instance_group,
secondary_instance_group):
logger.info('Running test_load_report_based_failover')
passed = True
try:
patch_backend_service(
gcp, backend_service,
[primary_instance_group, secondary_instance_group])
primary_instance_names = get_instance_names(gcp, primary_instance_group)
secondary_instance_names = get_instance_names(gcp,
secondary_instance_group)
wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
wait_for_healthy_backends(gcp, backend_service,
secondary_instance_group)
wait_until_all_rpcs_go_to_given_backends(primary_instance_names,
_WAIT_FOR_STATS_SEC)
# Set primary locality's balance mode to RATE, and RPS to 20% of the
# client's QPS. The secondary locality will be used.
max_rate = int(args.qps * 1 / 5)
logger.info('Patching backend service to RATE with %d max_rate',
max_rate)
patch_backend_service(
gcp,
backend_service, [primary_instance_group, secondary_instance_group],
balancing_mode='RATE',
max_rate=max_rate)
wait_until_all_rpcs_go_to_given_backends(
primary_instance_names + secondary_instance_names,
_WAIT_FOR_BACKEND_SEC)
# Set primary locality's balance mode to RATE, and RPS to 120% of the
# client's QPS. Only the primary locality will be used.
max_rate = int(args.qps * 6 / 5)
logger.info('Patching backend service to RATE with %d max_rate',
max_rate)
patch_backend_service(
gcp,
backend_service, [primary_instance_group, secondary_instance_group],
balancing_mode='RATE',
max_rate=max_rate)
wait_until_all_rpcs_go_to_given_backends(primary_instance_names,
_WAIT_FOR_BACKEND_SEC)
logger.info("success")
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_backend_service(gcp, backend_service,
[primary_instance_group])
instance_names = get_instance_names(gcp, primary_instance_group)
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_BACKEND_SEC)
def test_ping_pong(gcp, backend_service, instance_group):
logger.info('Running test_ping_pong')
wait_for_healthy_backends(gcp, backend_service, instance_group)
instance_names = get_instance_names(gcp, instance_group)
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_STATS_SEC)
def test_remove_instance_group(gcp, backend_service, instance_group,
same_zone_instance_group):
logger.info('Running test_remove_instance_group')
passed = True
try:
patch_backend_service(gcp,
backend_service,
[instance_group, same_zone_instance_group],
balancing_mode='RATE')
wait_for_healthy_backends(gcp, backend_service, instance_group)
wait_for_healthy_backends(gcp, backend_service,
same_zone_instance_group)
instance_names = get_instance_names(gcp, instance_group)
same_zone_instance_names = get_instance_names(gcp,
same_zone_instance_group)
try:
wait_until_all_rpcs_go_to_given_backends(
instance_names + same_zone_instance_names,
_WAIT_FOR_OPERATION_SEC)
remaining_instance_group = same_zone_instance_group
remaining_instance_names = same_zone_instance_names
except RpcDistributionError as e:
# If connected to TD in a different zone, we may route traffic to
# only one instance group. Determine which group that is to continue
# with the remainder of the test case.
try:
wait_until_all_rpcs_go_to_given_backends(
instance_names, _WAIT_FOR_STATS_SEC)
remaining_instance_group = same_zone_instance_group
remaining_instance_names = same_zone_instance_names
except RpcDistributionError as e:
wait_until_all_rpcs_go_to_given_backends(
same_zone_instance_names, _WAIT_FOR_STATS_SEC)
remaining_instance_group = instance_group
remaining_instance_names = instance_names
patch_backend_service(gcp,
backend_service, [remaining_instance_group],
balancing_mode='RATE')
wait_until_all_rpcs_go_to_given_backends(remaining_instance_names,
_WAIT_FOR_BACKEND_SEC)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_backend_service(gcp, backend_service, [instance_group])
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_BACKEND_SEC)
def test_round_robin(gcp, backend_service, instance_group):
logger.info('Running test_round_robin')
wait_for_healthy_backends(gcp, backend_service, instance_group)
instance_names = get_instance_names(gcp, instance_group)
threshold = 1
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_STATS_SEC)
# TODO(ericgribkoff) Delayed config propagation from earlier tests
# may result in briefly receiving an empty EDS update, resulting in failed
# RPCs. Retry distribution validation if this occurs; long-term fix is
# creating new backend resources for each individual test case.
# Each attempt takes 10 seconds. Config propagation can take several
# minutes.
max_attempts = 40
for i in range(max_attempts):
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
requests_received = [stats.rpcs_by_peer[x] for x in stats.rpcs_by_peer]
total_requests_received = sum(requests_received)
if total_requests_received != _NUM_TEST_RPCS:
logger.info('Unexpected RPC failures, retrying: %s', stats)
continue
expected_requests = total_requests_received / len(instance_names)
for instance in instance_names:
if abs(stats.rpcs_by_peer[instance] -
expected_requests) > threshold:
raise Exception(
'RPC peer distribution differs from expected by more than %d '
'for instance %s (%s)' % (threshold, instance, stats))
return
raise Exception('RPC failures persisted through %d retries' % max_attempts)
def test_secondary_locality_gets_no_requests_on_partial_primary_failure(
gcp,
backend_service,
primary_instance_group,
secondary_instance_group,
swapped_primary_and_secondary=False):
logger.info(
'Running secondary_locality_gets_no_requests_on_partial_primary_failure'
)
passed = True
try:
patch_backend_service(
gcp, backend_service,
[primary_instance_group, secondary_instance_group])
wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
wait_for_healthy_backends(gcp, backend_service,
secondary_instance_group)
primary_instance_names = get_instance_names(gcp, primary_instance_group)
wait_until_all_rpcs_go_to_given_backends(primary_instance_names,
_WAIT_FOR_STATS_SEC)
instances_to_stop = primary_instance_names[:1]
remaining_instances = primary_instance_names[1:]
try:
set_serving_status(instances_to_stop,
gcp.service_port,
serving=False)
wait_until_all_rpcs_go_to_given_backends(remaining_instances,
_WAIT_FOR_BACKEND_SEC)
finally:
set_serving_status(primary_instance_names,
gcp.service_port,
serving=True)
except RpcDistributionError as e:
if not swapped_primary_and_secondary and is_primary_instance_group(
gcp, secondary_instance_group):
# Swap expectation of primary and secondary instance groups.
test_secondary_locality_gets_no_requests_on_partial_primary_failure(
gcp,
backend_service,
secondary_instance_group,
primary_instance_group,
swapped_primary_and_secondary=True)
else:
passed = False
raise e
finally:
if passed or not args.halt_after_fail:
patch_backend_service(gcp, backend_service,
[primary_instance_group])
def test_secondary_locality_gets_requests_on_primary_failure(
gcp,
backend_service,
primary_instance_group,
secondary_instance_group,
swapped_primary_and_secondary=False):
logger.info('Running secondary_locality_gets_requests_on_primary_failure')
passed = True
try:
patch_backend_service(
gcp, backend_service,
[primary_instance_group, secondary_instance_group])
wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
wait_for_healthy_backends(gcp, backend_service,
secondary_instance_group)
primary_instance_names = get_instance_names(gcp, primary_instance_group)
secondary_instance_names = get_instance_names(gcp,
secondary_instance_group)
wait_until_all_rpcs_go_to_given_backends(primary_instance_names,
_WAIT_FOR_STATS_SEC)
try:
set_serving_status(primary_instance_names,
gcp.service_port,
serving=False)
wait_until_all_rpcs_go_to_given_backends(secondary_instance_names,
_WAIT_FOR_BACKEND_SEC)
finally:
set_serving_status(primary_instance_names,
gcp.service_port,
serving=True)
except RpcDistributionError as e:
if not swapped_primary_and_secondary and is_primary_instance_group(
gcp, secondary_instance_group):
# Swap expectation of primary and secondary instance groups.
test_secondary_locality_gets_requests_on_primary_failure(
gcp,
backend_service,
secondary_instance_group,
primary_instance_group,
swapped_primary_and_secondary=True)
else:
passed = False
raise e
finally:
if passed or not args.halt_after_fail:
patch_backend_service(gcp, backend_service,
[primary_instance_group])
def prepare_services_for_urlmap_tests(gcp, original_backend_service,
instance_group, alternate_backend_service,
same_zone_instance_group):
'''
This function prepares the services to be ready for tests that modifies
urlmaps.
Returns:
Returns original and alternate backend names as lists of strings.
'''
logger.info('waiting for original backends to become healthy')
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
patch_backend_service(gcp, alternate_backend_service,
[same_zone_instance_group])
logger.info('waiting for alternate to become healthy')
wait_for_healthy_backends(gcp, alternate_backend_service,
same_zone_instance_group)
original_backend_instances = get_instance_names(gcp, instance_group)
logger.info('original backends instances: %s', original_backend_instances)
alternate_backend_instances = get_instance_names(gcp,
same_zone_instance_group)
logger.info('alternate backends instances: %s', alternate_backend_instances)
# Start with all traffic going to original_backend_service.
logger.info('waiting for traffic to all go to original backends')
wait_until_all_rpcs_go_to_given_backends(original_backend_instances,
_WAIT_FOR_STATS_SEC)
return original_backend_instances, alternate_backend_instances
def test_metadata_filter(gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group):
logger.info("Running test_metadata_filter")
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
original_backend_instances = get_instance_names(gcp, instance_group)
alternate_backend_instances = get_instance_names(gcp,
same_zone_instance_group)
patch_backend_service(gcp, alternate_backend_service,
[same_zone_instance_group])
wait_for_healthy_backends(gcp, alternate_backend_service,
same_zone_instance_group)
passed = True
try:
with open(bootstrap_path) as f:
md = json.load(f)['node']['metadata']
match_labels = []
for k, v in list(md.items()):
match_labels.append({'name': k, 'value': v})
not_match_labels = [{'name': 'fake', 'value': 'fail'}]
test_route_rules = [
# test MATCH_ALL
[
{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': not_match_labels
}]
}],
'service': original_backend_service.url
},
{
'priority': 1,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': match_labels
}]
}],
'service': alternate_backend_service.url
},
],
# test mixing MATCH_ALL and MATCH_ANY
# test MATCH_ALL: super set labels won't match
[
{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': not_match_labels + match_labels
}]
}],
'service': original_backend_service.url
},
{
'priority': 1,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': not_match_labels + match_labels
}]
}],
'service': alternate_backend_service.url
},
],
# test MATCH_ANY
[
{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': not_match_labels
}]
}],
'service': original_backend_service.url
},
{
'priority': 1,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': not_match_labels + match_labels
}]
}],
'service': alternate_backend_service.url
},
],
# test match multiple route rules
[
{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': match_labels
}]
}],
'service': alternate_backend_service.url
},
{
'priority': 1,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': match_labels
}]
}],
'service': original_backend_service.url
},
]
]
for route_rules in test_route_rules:
wait_until_all_rpcs_go_to_given_backends(original_backend_instances,
_WAIT_FOR_STATS_SEC)
patch_url_map_backend_service(gcp,
original_backend_service,
route_rules=route_rules)
wait_until_no_rpcs_go_to_given_backends(original_backend_instances,
_WAIT_FOR_STATS_SEC)
wait_until_all_rpcs_go_to_given_backends(
alternate_backend_instances, _WAIT_FOR_STATS_SEC)
patch_url_map_backend_service(gcp, original_backend_service)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_backend_service(gcp, alternate_backend_service, [])
def test_api_listener(gcp, backend_service, instance_group,
alternate_backend_service):
logger.info("Running api_listener")
passed = True
new_config_suffix = ''
try:
wait_for_healthy_backends(gcp, backend_service, instance_group)
backend_instances = get_instance_names(gcp, instance_group)
wait_until_all_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
# create a second suite of map+tp+fr with the same host name in host rule
# and we have to disable proxyless validation because it needs `0.0.0.0`
# ip address in fr for proxyless and also we violate ip:port uniqueness
# for test purpose. See https://github.com/grpc/grpc-java/issues/8009
new_config_suffix = '2'
create_url_map(gcp, url_map_name + new_config_suffix, backend_service,
service_host_name)
create_target_proxy(gcp, target_proxy_name + new_config_suffix, False)
if not gcp.service_port:
raise Exception(
'Faied to find a valid port for the forwarding rule')
potential_ip_addresses = []
max_attempts = 10
for i in range(max_attempts):
potential_ip_addresses.append('10.10.10.%d' %
(random.randint(0, 255)))
create_global_forwarding_rule(gcp,
forwarding_rule_name + new_config_suffix,
[gcp.service_port],
potential_ip_addresses)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp,
url_map_name + new_config_suffix,
backend_service,
service_host_name)
wait_until_all_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
delete_global_forwarding_rule(gcp, forwarding_rule_name)
delete_target_proxy(gcp, target_proxy_name)
delete_url_map(gcp, url_map_name)
verify_attempts = int(_WAIT_FOR_URL_MAP_PATCH_SEC / _NUM_TEST_RPCS *
args.qps)
for i in range(verify_attempts):
wait_until_all_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
# delete host rule for the original host name
patch_url_map_backend_service(gcp, alternate_backend_service)
wait_until_no_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
delete_global_forwarding_rule(
gcp, forwarding_rule_name + new_config_suffix)
delete_target_proxy(gcp, target_proxy_name + new_config_suffix)
delete_url_map(gcp, url_map_name + new_config_suffix)
create_url_map(gcp, url_map_name, backend_service,
service_host_name)
create_target_proxy(gcp, target_proxy_name)
create_global_forwarding_rule(gcp, forwarding_rule_name,
potential_service_ports)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
server_uri = service_host_name + ':' + str(gcp.service_port)
else:
server_uri = service_host_name
return server_uri
def test_forwarding_rule_port_match(gcp, backend_service, instance_group):
logger.info("Running test_forwarding_rule_port_match")
passed = True
try:
wait_for_healthy_backends(gcp, backend_service, instance_group)
backend_instances = get_instance_names(gcp, instance_group)
wait_until_all_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
delete_global_forwarding_rule(gcp)
create_global_forwarding_rule(gcp, forwarding_rule_name, [
x for x in parse_port_range(_DEFAULT_PORT_RANGE)
if x != gcp.service_port
])
wait_until_no_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
delete_global_forwarding_rule(gcp)
create_global_forwarding_rule(gcp, forwarding_rule_name,
potential_service_ports)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
server_uri = service_host_name + ':' + str(gcp.service_port)
else:
server_uri = service_host_name
return server_uri
def test_forwarding_rule_default_port(gcp, backend_service, instance_group):
logger.info("Running test_forwarding_rule_default_port")
passed = True
try:
wait_for_healthy_backends(gcp, backend_service, instance_group)
backend_instances = get_instance_names(gcp, instance_group)
if gcp.service_port == _DEFAULT_SERVICE_PORT:
wait_until_all_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
delete_global_forwarding_rule(gcp)
create_global_forwarding_rule(gcp, forwarding_rule_name,
parse_port_range(_DEFAULT_PORT_RANGE))
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
wait_until_no_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
# expect success when no port in client request service uri, and no port in url-map
delete_global_forwarding_rule(gcp)
delete_target_proxy(gcp)
delete_url_map(gcp)
create_url_map(gcp, url_map_name, backend_service, service_host_name)
create_target_proxy(gcp, gcp.target_proxy.name, False)
potential_ip_addresses = []
max_attempts = 10
for i in range(max_attempts):
potential_ip_addresses.append('10.10.10.%d' %
(random.randint(0, 255)))
create_global_forwarding_rule(gcp, forwarding_rule_name, [80],
potential_ip_addresses)
wait_until_all_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
# expect failure when no port in client request uri, but specify port in url-map
patch_url_map_host_rule_with_port(gcp, url_map_name, backend_service,
service_host_name)
wait_until_no_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
delete_global_forwarding_rule(gcp)
delete_target_proxy(gcp)
delete_url_map(gcp)
create_url_map(gcp, url_map_name, backend_service,
service_host_name)
create_target_proxy(gcp, target_proxy_name)
create_global_forwarding_rule(gcp, forwarding_rule_name,
potential_service_ports)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
server_uri = service_host_name + ':' + str(gcp.service_port)
else:
server_uri = service_host_name
return server_uri
def test_traffic_splitting(gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group):
# This test start with all traffic going to original_backend_service. Then
# it updates URL-map to set default action to traffic splitting between
# original and alternate. It waits for all backends in both services to
# receive traffic, then verifies that weights are expected.
logger.info('Running test_traffic_splitting')
original_backend_instances, alternate_backend_instances = prepare_services_for_urlmap_tests(
gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group)
passed = True
try:
# Patch urlmap, change route action to traffic splitting between
# original and alternate.
logger.info('patching url map with traffic splitting')
original_service_percentage, alternate_service_percentage = 20, 80
patch_url_map_backend_service(
gcp,
services_with_weights={
original_backend_service: original_service_percentage,
alternate_backend_service: alternate_service_percentage,
})
# Split percentage between instances: [20,80] -> [10,10,40,40].
expected_instance_percentage = [
original_service_percentage * 1.0 / len(original_backend_instances)
] * len(original_backend_instances) + [
alternate_service_percentage * 1.0 /
len(alternate_backend_instances)
] * len(alternate_backend_instances)
# Wait for traffic to go to both services.
logger.info(
'waiting for traffic to go to all backends (including alternate)')
wait_until_all_rpcs_go_to_given_backends(
original_backend_instances + alternate_backend_instances,
_WAIT_FOR_STATS_SEC)
# Verify that weights between two services are expected.
retry_count = 10
# Each attempt takes about 10 seconds, 10 retries is equivalent to 100
# seconds timeout.
for i in range(retry_count):
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
got_instance_count = [
stats.rpcs_by_peer[i] for i in original_backend_instances
] + [stats.rpcs_by_peer[i] for i in alternate_backend_instances]
total_count = sum(got_instance_count)
got_instance_percentage = [
x * 100.0 / total_count for x in got_instance_count
]
try:
compare_distributions(got_instance_percentage,
expected_instance_percentage, 5)
except Exception as e:
logger.info('attempt %d', i)
logger.info('got percentage: %s', got_instance_percentage)
logger.info('expected percentage: %s',
expected_instance_percentage)
logger.info(e)
if i == retry_count - 1:
raise Exception(
'RPC distribution (%s) differs from expected (%s)' %
(got_instance_percentage, expected_instance_percentage))
else:
logger.info("success")
break
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
def test_path_matching(gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group):
# This test start with all traffic (UnaryCall and EmptyCall) going to
# original_backend_service.
#
# Then it updates URL-map to add routes, to make UnaryCall and EmptyCall to
# go different backends. It waits for all backends in both services to
# receive traffic, then verifies that traffic goes to the expected
# backends.
logger.info('Running test_path_matching')
original_backend_instances, alternate_backend_instances = prepare_services_for_urlmap_tests(
gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group)
passed = True
try:
# A list of tuples (route_rules, expected_instances).
test_cases = [
(
[{
'priority': 0,
# FullPath EmptyCall -> alternate_backend_service.
'matchRules': [{
'fullPathMatch': '/grpc.testing.TestService/EmptyCall'
}],
'service': alternate_backend_service.url
}],
{
"EmptyCall": alternate_backend_instances,
"UnaryCall": original_backend_instances
}),
(
[{
'priority': 0,
# Prefix UnaryCall -> alternate_backend_service.
'matchRules': [{
'prefixMatch': '/grpc.testing.TestService/Unary'
}],
'service': alternate_backend_service.url
}],
{
"UnaryCall": alternate_backend_instances,
"EmptyCall": original_backend_instances
}),
(
# This test case is similar to the one above (but with route
# services swapped). This test has two routes (full_path and
# the default) to match EmptyCall, and both routes set
# alternative_backend_service as the action. This forces the
# client to handle duplicate Clusters in the RDS response.
[
{
'priority': 0,
# Prefix UnaryCall -> original_backend_service.
'matchRules': [{
'prefixMatch': '/grpc.testing.TestService/Unary'
}],
'service': original_backend_service.url
},
{
'priority': 1,
# FullPath EmptyCall -> alternate_backend_service.
'matchRules': [{
'fullPathMatch':
'/grpc.testing.TestService/EmptyCall'
}],
'service': alternate_backend_service.url
}
],
{
"UnaryCall": original_backend_instances,
"EmptyCall": alternate_backend_instances
}),
(
[{
'priority': 0,
# Regex UnaryCall -> alternate_backend_service.
'matchRules': [{
'regexMatch':
'^\/.*\/UnaryCall$' # Unary methods with any services.
}],
'service': alternate_backend_service.url
}],
{
"UnaryCall": alternate_backend_instances,
"EmptyCall": original_backend_instances
}),
(
[{
'priority': 0,
# ignoreCase EmptyCall -> alternate_backend_service.
'matchRules': [{
# Case insensitive matching.
'fullPathMatch': '/gRpC.tEsTinG.tEstseRvice/empTycaLl',
'ignoreCase': True,
}],
'service': alternate_backend_service.url
}],
{
"UnaryCall": original_backend_instances,
"EmptyCall": alternate_backend_instances
}),
]
for (route_rules, expected_instances) in test_cases:
logger.info('patching url map with %s', route_rules)
patch_url_map_backend_service(gcp,
original_backend_service,
route_rules=route_rules)
# Wait for traffic to go to both services.
logger.info(
'waiting for traffic to go to all backends (including alternate)'
)
wait_until_all_rpcs_go_to_given_backends(
original_backend_instances + alternate_backend_instances,
_WAIT_FOR_STATS_SEC)
retry_count = 80
# Each attempt takes about 5 seconds, 80 retries is equivalent to 400
# seconds timeout.
for i in range(retry_count):
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
if not stats.rpcs_by_method:
raise ValueError(
'stats.rpcs_by_method is None, the interop client stats service does not support this test case'
)
logger.info('attempt %d', i)
if compare_expected_instances(stats, expected_instances):
logger.info("success")
break
elif i == retry_count - 1:
raise Exception(
'timeout waiting for RPCs to the expected instances: %s'
% expected_instances)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
def test_header_matching(gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group):
# This test start with all traffic (UnaryCall and EmptyCall) going to
# original_backend_service.
#
# Then it updates URL-map to add routes, to make RPCs with test headers to
# go to different backends. It waits for all backends in both services to
# receive traffic, then verifies that traffic goes to the expected
# backends.
logger.info('Running test_header_matching')
original_backend_instances, alternate_backend_instances = prepare_services_for_urlmap_tests(
gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group)
passed = True
try:
# A list of tuples (route_rules, expected_instances).
test_cases = [
(
[{
'priority': 0,
# Header ExactMatch -> alternate_backend_service.
# EmptyCall is sent with the metadata.
'matchRules': [{
'prefixMatch':
'/',
'headerMatches': [{
'headerName': _TEST_METADATA_KEY,
'exactMatch': _TEST_METADATA_VALUE_EMPTY
}]
}],
'service': alternate_backend_service.url
}],
{
"EmptyCall": alternate_backend_instances,
"UnaryCall": original_backend_instances
}),
(
[{
'priority': 0,
# Header PrefixMatch -> alternate_backend_service.
# UnaryCall is sent with the metadata.
'matchRules': [{
'prefixMatch':
'/',
'headerMatches': [{
'headerName': _TEST_METADATA_KEY,
'prefixMatch': _TEST_METADATA_VALUE_UNARY[:2]
}]
}],
'service': alternate_backend_service.url
}],
{
"EmptyCall": original_backend_instances,
"UnaryCall": alternate_backend_instances
}),
(
[{
'priority': 0,
# Header SuffixMatch -> alternate_backend_service.
# EmptyCall is sent with the metadata.
'matchRules': [{
'prefixMatch':
'/',
'headerMatches': [{
'headerName': _TEST_METADATA_KEY,
'suffixMatch': _TEST_METADATA_VALUE_EMPTY[-2:]
}]
}],
'service': alternate_backend_service.url
}],
{
"EmptyCall": alternate_backend_instances,
"UnaryCall": original_backend_instances
}),
(
[{
'priority': 0,
# Header 'xds_md_numeric' present -> alternate_backend_service.
# UnaryCall is sent with the metadata, so will be sent to alternative.
'matchRules': [{
'prefixMatch':
'/',
'headerMatches': [{
'headerName': _TEST_METADATA_NUMERIC_KEY,
'presentMatch': True
}]
}],
'service': alternate_backend_service.url
}],
{
"EmptyCall": original_backend_instances,
"UnaryCall": alternate_backend_instances
}),
(
[{
'priority': 0,
# Header invert ExactMatch -> alternate_backend_service.
# UnaryCall is sent with the metadata, so will be sent to
# original. EmptyCall will be sent to alternative.
'matchRules': [{
'prefixMatch':
'/',
'headerMatches': [{
'headerName': _TEST_METADATA_KEY,
'exactMatch': _TEST_METADATA_VALUE_UNARY,
'invertMatch': True
}]
}],
'service': alternate_backend_service.url
}],
{
"EmptyCall": alternate_backend_instances,
"UnaryCall": original_backend_instances
}),
(
[{
'priority': 0,
# Header 'xds_md_numeric' range [100,200] -> alternate_backend_service.
# UnaryCall is sent with the metadata in range.
'matchRules': [{
'prefixMatch':
'/',
'headerMatches': [{
'headerName': _TEST_METADATA_NUMERIC_KEY,
'rangeMatch': {
'rangeStart': '100',
'rangeEnd': '200'
}
}]
}],
'service': alternate_backend_service.url
}],
{
"EmptyCall": original_backend_instances,
"UnaryCall": alternate_backend_instances
}),
(
[{
'priority': 0,
# Header RegexMatch -> alternate_backend_service.
# EmptyCall is sent with the metadata.
'matchRules': [{
'prefixMatch':
'/',
'headerMatches': [{
'headerName':
_TEST_METADATA_KEY,
'regexMatch':
"^%s.*%s$" % (_TEST_METADATA_VALUE_EMPTY[:2],
_TEST_METADATA_VALUE_EMPTY[-2:])
}]
}],
'service': alternate_backend_service.url
}],
{
"EmptyCall": alternate_backend_instances,
"UnaryCall": original_backend_instances
}),
]
for (route_rules, expected_instances) in test_cases:
logger.info('patching url map with %s -> alternative',
route_rules[0]['matchRules'])
patch_url_map_backend_service(gcp,
original_backend_service,
route_rules=route_rules)
# Wait for traffic to go to both services.
logger.info(
'waiting for traffic to go to all backends (including alternate)'
)
wait_until_all_rpcs_go_to_given_backends(
original_backend_instances + alternate_backend_instances,
_WAIT_FOR_STATS_SEC)
retry_count = 80
# Each attempt takes about 5 seconds, 80 retries is equivalent to 400
# seconds timeout.
for i in range(retry_count):
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
if not stats.rpcs_by_method:
raise ValueError(
'stats.rpcs_by_method is None, the interop client stats service does not support this test case'
)
logger.info('attempt %d', i)
if compare_expected_instances(stats, expected_instances):
logger.info("success")
break
elif i == retry_count - 1:
raise Exception(
'timeout waiting for RPCs to the expected instances: %s'
% expected_instances)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
def test_circuit_breaking(gcp, original_backend_service, instance_group,
same_zone_instance_group):
'''
Since backend service circuit_breakers configuration cannot be unset,
which causes trouble for restoring validate_for_proxy flag in target
proxy/global forwarding rule. This test uses dedicated backend sevices.
The url_map and backend services undergoes the following state changes:
Before test:
original_backend_service -> [instance_group]
extra_backend_service -> []
more_extra_backend_service -> []
url_map -> [original_backend_service]
In test:
extra_backend_service (with circuit_breakers) -> [instance_group]
more_extra_backend_service (with circuit_breakers) -> [same_zone_instance_group]
url_map -> [extra_backend_service, more_extra_backend_service]
After test:
original_backend_service -> [instance_group]
extra_backend_service (with circuit_breakers) -> []
more_extra_backend_service (with circuit_breakers) -> []
url_map -> [original_backend_service]
'''
logger.info('Running test_circuit_breaking')
additional_backend_services = []
passed = True
try:
# TODO(chengyuanzhang): Dedicated backend services created for circuit
# breaking test. Once the issue for unsetting backend service circuit
# breakers is resolved or configuring backend service circuit breakers is
# enabled for config validation, these dedicated backend services can be
# eliminated.
extra_backend_service_name = _BASE_BACKEND_SERVICE_NAME + '-extra' + gcp_suffix
more_extra_backend_service_name = _BASE_BACKEND_SERVICE_NAME + '-more-extra' + gcp_suffix
extra_backend_service = add_backend_service(gcp,
extra_backend_service_name)
additional_backend_services.append(extra_backend_service)
more_extra_backend_service = add_backend_service(
gcp, more_extra_backend_service_name)
additional_backend_services.append(more_extra_backend_service)
# The config validation for proxyless doesn't allow setting
# circuit_breakers. Disable validate validate_for_proxyless
# for this test. This can be removed when validation
# accepts circuit_breakers.
logger.info('disabling validate_for_proxyless in target proxy')
set_validate_for_proxyless(gcp, False)
extra_backend_service_max_requests = 500
more_extra_backend_service_max_requests = 1000
patch_backend_service(gcp,
extra_backend_service, [instance_group],
circuit_breakers={
'maxRequests':
extra_backend_service_max_requests
})
logger.info('Waiting for extra backends to become healthy')
wait_for_healthy_backends(gcp, extra_backend_service, instance_group)
patch_backend_service(gcp,
more_extra_backend_service,
[same_zone_instance_group],
circuit_breakers={
'maxRequests':
more_extra_backend_service_max_requests
})
logger.info('Waiting for more extra backend to become healthy')
wait_for_healthy_backends(gcp, more_extra_backend_service,
same_zone_instance_group)
extra_backend_instances = get_instance_names(gcp, instance_group)
more_extra_backend_instances = get_instance_names(
gcp, same_zone_instance_group)
route_rules = [
{
'priority': 0,
# UnaryCall -> extra_backend_service
'matchRules': [{
'fullPathMatch': '/grpc.testing.TestService/UnaryCall'
}],
'service': extra_backend_service.url
},
{
'priority': 1,
# EmptyCall -> more_extra_backend_service
'matchRules': [{
'fullPathMatch': '/grpc.testing.TestService/EmptyCall'
}],
'service': more_extra_backend_service.url
},
]
# Make client send UNARY_CALL and EMPTY_CALL.
configure_client([
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
messages_pb2.ClientConfigureRequest.RpcType.EMPTY_CALL
])
logger.info('Patching url map with %s', route_rules)
patch_url_map_backend_service(gcp,
extra_backend_service,
route_rules=route_rules)
logger.info('Waiting for traffic to go to all backends')
wait_until_all_rpcs_go_to_given_backends(
extra_backend_instances + more_extra_backend_instances,
_WAIT_FOR_STATS_SEC)
# Make all calls keep-open.
configure_client([
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
messages_pb2.ClientConfigureRequest.RpcType.EMPTY_CALL
], [(messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
'rpc-behavior', 'keep-open'),
(messages_pb2.ClientConfigureRequest.RpcType.EMPTY_CALL,
'rpc-behavior', 'keep-open')])
wait_until_rpcs_in_flight(
'UNARY_CALL', (_WAIT_FOR_BACKEND_SEC +
int(extra_backend_service_max_requests / args.qps)),
extra_backend_service_max_requests, 1)
logger.info('UNARY_CALL reached stable state (%d)',
extra_backend_service_max_requests)
wait_until_rpcs_in_flight(
'EMPTY_CALL',
(_WAIT_FOR_BACKEND_SEC +
int(more_extra_backend_service_max_requests / args.qps)),
more_extra_backend_service_max_requests, 1)
logger.info('EMPTY_CALL reached stable state (%d)',
more_extra_backend_service_max_requests)
# Increment circuit breakers max_requests threshold.
extra_backend_service_max_requests = 800
patch_backend_service(gcp,
extra_backend_service, [instance_group],
circuit_breakers={
'maxRequests':
extra_backend_service_max_requests
})
wait_until_rpcs_in_flight(
'UNARY_CALL', (_WAIT_FOR_BACKEND_SEC +
int(extra_backend_service_max_requests / args.qps)),
extra_backend_service_max_requests, 1)
logger.info('UNARY_CALL reached stable state after increase (%d)',
extra_backend_service_max_requests)
logger.info('success')
# Avoid new RPCs being outstanding (some test clients create threads
# for sending RPCs) after restoring backend services.
configure_client(
[messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL])
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, original_backend_service,
[instance_group])
for backend_service in additional_backend_services:
delete_backend_service(gcp, backend_service)
set_validate_for_proxyless(gcp, True)
def test_timeout(gcp, original_backend_service, instance_group):
logger.info('Running test_timeout')
logger.info('waiting for original backends to become healthy')
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
# UnaryCall -> maxStreamDuration:3s
route_rules = [{
'priority': 0,
'matchRules': [{
'fullPathMatch': '/grpc.testing.TestService/UnaryCall'
}],
'service': original_backend_service.url,
'routeAction': {
'maxStreamDuration': {
'seconds': 3,
},
},
}]
patch_url_map_backend_service(gcp,
original_backend_service,
route_rules=route_rules)
# A list of tuples (testcase_name, {client_config}, {expected_results})
test_cases = [
(
'timeout_exceeded (UNARY_CALL), timeout_different_route (EMPTY_CALL)',
# UnaryCall and EmptyCall both sleep-4.
# UnaryCall timeouts, EmptyCall succeeds.
{
'rpc_types': [
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
messages_pb2.ClientConfigureRequest.RpcType.EMPTY_CALL,
],
'metadata': [
(messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
'rpc-behavior', 'sleep-4'),
(messages_pb2.ClientConfigureRequest.RpcType.EMPTY_CALL,
'rpc-behavior', 'sleep-4'),
],
},
{
'UNARY_CALL': 4, # DEADLINE_EXCEEDED
'EMPTY_CALL': 0,
},
),
(
'app_timeout_exceeded',
# UnaryCall only with sleep-2; timeout=1s; calls timeout.
{
'rpc_types': [
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
],
'metadata': [
(messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
'rpc-behavior', 'sleep-2'),
],
'timeout_sec': 1,
},
{
'UNARY_CALL': 4, # DEADLINE_EXCEEDED
},
),
(
'timeout_not_exceeded',
# UnaryCall only with no sleep; calls succeed.
{
'rpc_types': [
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
],
},
{
'UNARY_CALL': 0,
},
)
]
passed = True
try:
first_case = True
for (testcase_name, client_config, expected_results) in test_cases:
logger.info('starting case %s', testcase_name)
configure_client(**client_config)
# wait a second to help ensure the client stops sending RPCs with
# the old config. We will make multiple attempts if it is failing,
# but this improves confidence that the test is valid if the
# previous client_config would lead to the same results.
time.sleep(1)
# Each attempt takes 10 seconds; 20 attempts is equivalent to 200
# second timeout.
attempt_count = 20
if first_case:
attempt_count = 120
first_case = False
before_stats = get_client_accumulated_stats()
if not before_stats.stats_per_method:
raise ValueError(
'stats.stats_per_method is None, the interop client stats service does not support this test case'
)
for i in range(attempt_count):
logger.info('%s: attempt %d', testcase_name, i)
test_runtime_secs = 10
time.sleep(test_runtime_secs)
after_stats = get_client_accumulated_stats()
success = True
for rpc, status in list(expected_results.items()):
qty = (after_stats.stats_per_method[rpc].result[status] -
before_stats.stats_per_method[rpc].result[status])
want = test_runtime_secs * args.qps
# Allow 10% deviation from expectation to reduce flakiness
if qty < (want * .9) or qty > (want * 1.1):
logger.info('%s: failed due to %s[%s]: got %d want ~%d',
testcase_name, rpc, status, qty, want)
success = False
if success:
logger.info('success')
break
logger.info('%s attempt %d failed', testcase_name, i)
before_stats = after_stats
else:
raise Exception(
'%s: timeout waiting for expected results: %s; got %s' %
(testcase_name, expected_results,
after_stats.stats_per_method))
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
def test_fault_injection(gcp, original_backend_service, instance_group):
logger.info('Running test_fault_injection')
logger.info('waiting for original backends to become healthy')
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
testcase_header = 'fi_testcase'
def _route(pri, name, fi_policy):
return {
'priority': pri,
'matchRules': [{
'prefixMatch':
'/',
'headerMatches': [{
'headerName': testcase_header,
'exactMatch': name,
}],
}],
'service': original_backend_service.url,
'routeAction': {
'faultInjectionPolicy': fi_policy
},
}
def _abort(pct):
return {
'abort': {
'httpStatus': 401,
'percentage': pct,
}
}
def _delay(pct):
return {
'delay': {
'fixedDelay': {
'seconds': '20'
},
'percentage': pct,
}
}
zero_route = _abort(0)
zero_route.update(_delay(0))
route_rules = [
_route(0, 'zero_percent_fault_injection', zero_route),
_route(1, 'always_delay', _delay(100)),
_route(2, 'always_abort', _abort(100)),
_route(3, 'delay_half', _delay(50)),
_route(4, 'abort_half', _abort(50)),
{
'priority': 5,
'matchRules': [{
'prefixMatch': '/'
}],
'service': original_backend_service.url,
},
]
set_validate_for_proxyless(gcp, False)
patch_url_map_backend_service(gcp,
original_backend_service,
route_rules=route_rules)
# A list of tuples (testcase_name, {client_config}, {code: percent}). Each
# test case will set the testcase_header with the testcase_name for routing
# to the appropriate config for the case, defined above.
test_cases = [
(
'zero_percent_fault_injection',
{},
{
0: 1
}, # OK
),
(
'non_matching_fault_injection', # Not in route_rules, above.
{},
{
0: 1
}, # OK
),
(
'always_delay',
{
'timeout_sec': 2
},
{
4: 1
}, # DEADLINE_EXCEEDED
),
(
'always_abort',
{},
{
16: 1
}, # UNAUTHENTICATED
),
(
'delay_half',
{
'timeout_sec': 2
},
{
4: .5,
0: .5
}, # DEADLINE_EXCEEDED / OK: 50% / 50%
),
(
'abort_half',
{},
{
16: .5,
0: .5
}, # UNAUTHENTICATED / OK: 50% / 50%
)
]
passed = True
try:
first_case = True
for (testcase_name, client_config, expected_results) in test_cases:
logger.info('starting case %s', testcase_name)
client_config['metadata'] = [
(messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
testcase_header, testcase_name)
]
client_config['rpc_types'] = [
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
]
configure_client(**client_config)
# wait a second to help ensure the client stops sending RPCs with
# the old config. We will make multiple attempts if it is failing,
# but this improves confidence that the test is valid if the
# previous client_config would lead to the same results.
time.sleep(1)
# Each attempt takes 10 seconds; 20 attempts is equivalent to 200
# second timeout.
attempt_count = 20
if first_case:
attempt_count = 120
first_case = False
before_stats = get_client_accumulated_stats()
if not before_stats.stats_per_method:
raise ValueError(
'stats.stats_per_method is None, the interop client stats service does not support this test case'
)
for i in range(attempt_count):
logger.info('%s: attempt %d', testcase_name, i)
test_runtime_secs = 10
time.sleep(test_runtime_secs)
after_stats = get_client_accumulated_stats()
success = True
for status, pct in list(expected_results.items()):
rpc = 'UNARY_CALL'
qty = (after_stats.stats_per_method[rpc].result[status] -
before_stats.stats_per_method[rpc].result[status])
want = pct * args.qps * test_runtime_secs
# Allow 10% deviation from expectation to reduce flakiness
VARIANCE_ALLOWED = 0.1
if abs(qty - want) > want * VARIANCE_ALLOWED:
logger.info('%s: failed due to %s[%s]: got %d want ~%d',
testcase_name, rpc, status, qty, want)
success = False
if success:
logger.info('success')
break
logger.info('%s attempt %d failed', testcase_name, i)
before_stats = after_stats
else:
raise Exception(
'%s: timeout waiting for expected results: %s; got %s' %
(testcase_name, expected_results,
after_stats.stats_per_method))
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
set_validate_for_proxyless(gcp, True)
def test_csds(gcp, original_backend_service, instance_group, server_uri):
test_csds_timeout_s = datetime.timedelta(minutes=5).total_seconds()
sleep_interval_between_attempts_s = datetime.timedelta(
seconds=2).total_seconds()
logger.info('Running test_csds')
logger.info('waiting for original backends to become healthy')
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
# Test case timeout: 5 minutes
deadline = time.time() + test_csds_timeout_s
cnt = 0
while time.time() <= deadline:
client_config = get_client_xds_config_dump()
logger.info('test_csds attempt %d: received xDS config %s', cnt,
json.dumps(client_config, indent=2))
if client_config is not None:
# Got the xDS config dump, now validate it
ok = True
try:
if client_config['node']['locality']['zone'] != args.zone:
logger.info('Invalid zone %s != %s',
client_config['node']['locality']['zone'],
args.zone)
ok = False
seen = set()
for xds_config in client_config['xds_config']:
if 'listener_config' in xds_config:
listener_name = xds_config['listener_config'][
'dynamic_listeners'][0]['active_state']['listener'][
'name']
if listener_name != server_uri:
logger.info('Invalid Listener name %s != %s',
listener_name, server_uri)
ok = False
else:
seen.add('lds')
elif 'route_config' in xds_config:
num_vh = len(
xds_config['route_config']['dynamic_route_configs']
[0]['route_config']['virtual_hosts'])
if num_vh <= 0:
logger.info('Invalid number of VirtualHosts %s',
num_vh)
ok = False
else:
seen.add('rds')
elif 'cluster_config' in xds_config:
cluster_type = xds_config['cluster_config'][
'dynamic_active_clusters'][0]['cluster']['type']
if cluster_type != 'EDS':
logger.info('Invalid cluster type %s != EDS',
cluster_type)
ok = False
else:
seen.add('cds')
elif 'endpoint_config' in xds_config:
sub_zone = xds_config["endpoint_config"][
"dynamic_endpoint_configs"][0]["endpoint_config"][
"endpoints"][0]["locality"]["sub_zone"]
if args.zone not in sub_zone:
logger.info('Invalid endpoint sub_zone %s',
sub_zone)
ok = False
else:
seen.add('eds')
want = {'lds', 'rds', 'cds', 'eds'}
if seen != want:
logger.info('Incomplete xDS config dump, seen=%s', seen)
ok = False
except:
logger.exception('Error in xDS config dump:')
ok = False
finally:
if ok:
# Successfully fetched xDS config, and they looks good.
logger.info('success')
return
logger.info('test_csds attempt %d failed', cnt)
# Give the client some time to fetch xDS resources
time.sleep(sleep_interval_between_attempts_s)
cnt += 1
raise RuntimeError('failed to receive a valid xDS config in %s seconds' %
test_csds_timeout_s)
def set_validate_for_proxyless(gcp, validate_for_proxyless):
if not gcp.alpha_compute:
logger.debug(
'Not setting validateForProxy because alpha is not enabled')
return
# This function deletes global_forwarding_rule and target_proxy, then
# recreate target_proxy with validateForProxyless=False. This is necessary
# because patching target_grpc_proxy isn't supported.
delete_global_forwarding_rule(gcp)
delete_target_proxy(gcp)
create_target_proxy(gcp, gcp.target_proxy.name, validate_for_proxyless)
create_global_forwarding_rule(gcp, gcp.global_forwarding_rule.name,
[gcp.service_port])
def get_serving_status(instance, service_port):
with grpc.insecure_channel('%s:%d' % (instance, service_port)) as channel:
health_stub = health_pb2_grpc.HealthStub(channel)
return health_stub.Check(health_pb2.HealthCheckRequest())
def set_serving_status(instances, service_port, serving):
logger.info('setting %s serving status to %s', instances, serving)
for instance in instances:
with grpc.insecure_channel('%s:%d' %
(instance, service_port)) as channel:
logger.info('setting %s serving status to %s', instance, serving)
stub = test_pb2_grpc.XdsUpdateHealthServiceStub(channel)
retry_count = 5
for i in range(5):
if serving:
stub.SetServing(empty_pb2.Empty())
else:
stub.SetNotServing(empty_pb2.Empty())
serving_status = get_serving_status(instance, service_port)
logger.info('got instance service status %s', serving_status)
want_status = health_pb2.HealthCheckResponse.SERVING if serving else health_pb2.HealthCheckResponse.NOT_SERVING
if serving_status.status == want_status:
break
if i == retry_count - 1:
raise Exception(
'failed to set instance service status after %d retries'
% retry_count)
def is_primary_instance_group(gcp, instance_group):
# Clients may connect to a TD instance in a different region than the
# client, in which case primary/secondary assignments may not be based on
# the client's actual locality.
instance_names = get_instance_names(gcp, instance_group)
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
return all(
peer in instance_names for peer in list(stats.rpcs_by_peer.keys()))
def get_startup_script(path_to_server_binary, service_port):
if path_to_server_binary:
return 'nohup %s --port=%d 1>/dev/null &' % (path_to_server_binary,
service_port)
else:
return """#!/bin/bash
sudo apt update
sudo apt install -y git default-jdk
mkdir java_server
pushd java_server
git clone https://github.com/grpc/grpc-java.git
pushd grpc-java
pushd interop-testing
../gradlew installDist -x test -PskipCodegen=true -PskipAndroid=true
nohup build/install/grpc-interop-testing/bin/xds-test-server \
--port=%d 1>/dev/null &""" % service_port
def create_instance_template(gcp, name, network, source_image, machine_type,
startup_script):
config = {
'name': name,
'properties': {
'tags': {
'items': ['allow-health-checks']
},
'machineType': machine_type,
'serviceAccounts': [{
'email': 'default',
'scopes': ['https://www.googleapis.com/auth/cloud-platform',]
}],
'networkInterfaces': [{
'accessConfigs': [{
'type': 'ONE_TO_ONE_NAT'
}],
'network': network
}],
'disks': [{
'boot': True,
'initializeParams': {
'sourceImage': source_image
},
'autoDelete': True
}],
'metadata': {
'items': [{
'key': 'startup-script',
'value': startup_script
}]
}
}
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.instanceTemplates().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.instance_template = GcpResource(config['name'], result['targetLink'])
def add_instance_group(gcp, zone, name, size):
config = {
'name': name,
'instanceTemplate': gcp.instance_template.url,
'targetSize': size,
'namedPorts': [{
'name': 'grpc',
'port': gcp.service_port
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.instanceGroupManagers().insert(
project=gcp.project, zone=zone,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_zone_operation(gcp, zone, result['name'])
result = gcp.compute.instanceGroupManagers().get(
project=gcp.project, zone=zone,
instanceGroupManager=config['name']).execute(
num_retries=_GCP_API_RETRIES)
instance_group = InstanceGroup(config['name'], result['instanceGroup'],
zone)
gcp.instance_groups.append(instance_group)
wait_for_instance_group_to_reach_expected_size(gcp, instance_group, size,
_WAIT_FOR_OPERATION_SEC)
return instance_group
def create_health_check(gcp, name):
if gcp.alpha_compute:
config = {
'name': name,
'type': 'GRPC',
'grpcHealthCheck': {
'portSpecification': 'USE_SERVING_PORT'
}
}
compute_to_use = gcp.alpha_compute
else:
config = {
'name': name,
'type': 'TCP',
'tcpHealthCheck': {
'portName': 'grpc'
}
}
compute_to_use = gcp.compute
logger.debug('Sending GCP request with body=%s', config)
result = compute_to_use.healthChecks().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.health_check = GcpResource(config['name'], result['targetLink'])
def create_health_check_firewall_rule(gcp, name):
config = {
'name': name,
'direction': 'INGRESS',
'allowed': [{
'IPProtocol': 'tcp'
}],
'sourceRanges': ['35.191.0.0/16', '130.211.0.0/22'],
'targetTags': ['allow-health-checks'],
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.firewalls().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.health_check_firewall_rule = GcpResource(config['name'],
result['targetLink'])
def add_backend_service(gcp, name):
if gcp.alpha_compute:
protocol = 'GRPC'
compute_to_use = gcp.alpha_compute
else:
protocol = 'HTTP2'
compute_to_use = gcp.compute
config = {
'name': name,
'loadBalancingScheme': 'INTERNAL_SELF_MANAGED',
'healthChecks': [gcp.health_check.url],
'portName': 'grpc',
'protocol': protocol
}
logger.debug('Sending GCP request with body=%s', config)
result = compute_to_use.backendServices().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
backend_service = GcpResource(config['name'], result['targetLink'])
gcp.backend_services.append(backend_service)
return backend_service
def create_url_map(gcp, name, backend_service, host_name):
config = {
'name': name,
'defaultService': backend_service.url,
'pathMatchers': [{
'name': _PATH_MATCHER_NAME,
'defaultService': backend_service.url,
}],
'hostRules': [{
'hosts': [host_name],
'pathMatcher': _PATH_MATCHER_NAME
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.urlMaps().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.url_map = GcpResource(config['name'], result['targetLink'])
def patch_url_map_host_rule_with_port(gcp, name, backend_service, host_name):
config = {
'hostRules': [{
'hosts': ['%s:%d' % (host_name, gcp.service_port)],
'pathMatcher': _PATH_MATCHER_NAME
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.urlMaps().patch(
project=gcp.project, urlMap=name,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
def create_target_proxy(gcp, name, validate_for_proxyless=True):
if gcp.alpha_compute:
config = {
'name': name,
'url_map': gcp.url_map.url,
'validate_for_proxyless': validate_for_proxyless
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.alpha_compute.targetGrpcProxies().insert(
project=gcp.project,
body=config).execute(num_retries=_GCP_API_RETRIES)
else:
config = {
'name': name,
'url_map': gcp.url_map.url,
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.targetHttpProxies().insert(
project=gcp.project,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.target_proxy = GcpResource(config['name'], result['targetLink'])
def create_global_forwarding_rule(gcp,
name,
potential_ports,
potential_ip_addresses=['0.0.0.0']):
if gcp.alpha_compute:
compute_to_use = gcp.alpha_compute
else:
compute_to_use = gcp.compute
for port in potential_ports:
for ip_address in potential_ip_addresses:
try:
config = {
'name': name,
'loadBalancingScheme': 'INTERNAL_SELF_MANAGED',
'portRange': str(port),
'IPAddress': ip_address,
'network': args.network,
'target': gcp.target_proxy.url,
}
logger.debug('Sending GCP request with body=%s', config)
result = compute_to_use.globalForwardingRules().insert(
project=gcp.project,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.global_forwarding_rule = GcpResource(
config['name'], result['targetLink'])
gcp.service_port = port
return
except googleapiclient.errors.HttpError as http_error:
logger.warning(
'Got error %s when attempting to create forwarding rule to '
'%s:%d. Retrying with another port.' %
(http_error, ip_address, port))
def get_health_check(gcp, health_check_name):
try:
result = gcp.compute.healthChecks().get(
project=gcp.project, healthCheck=health_check_name).execute()
gcp.health_check = GcpResource(health_check_name, result['selfLink'])
except Exception as e:
gcp.errors.append(e)
gcp.health_check = GcpResource(health_check_name, None)
def get_health_check_firewall_rule(gcp, firewall_name):
try:
result = gcp.compute.firewalls().get(project=gcp.project,
firewall=firewall_name).execute()
gcp.health_check_firewall_rule = GcpResource(firewall_name,
result['selfLink'])
except Exception as e:
gcp.errors.append(e)
gcp.health_check_firewall_rule = GcpResource(firewall_name, None)
def get_backend_service(gcp, backend_service_name, record_error=True):
try:
result = gcp.compute.backendServices().get(
project=gcp.project, backendService=backend_service_name).execute()
backend_service = GcpResource(backend_service_name, result['selfLink'])
except Exception as e:
if record_error:
gcp.errors.append(e)
backend_service = GcpResource(backend_service_name, None)
gcp.backend_services.append(backend_service)
return backend_service
def get_url_map(gcp, url_map_name):
try:
result = gcp.compute.urlMaps().get(project=gcp.project,
urlMap=url_map_name).execute()
gcp.url_map = GcpResource(url_map_name, result['selfLink'])
except Exception as e:
gcp.errors.append(e)
gcp.url_map = GcpResource(url_map_name, None)
def get_target_proxy(gcp, target_proxy_name):
try:
if gcp.alpha_compute:
result = gcp.alpha_compute.targetGrpcProxies().get(
project=gcp.project,
targetGrpcProxy=target_proxy_name).execute()
else:
result = gcp.compute.targetHttpProxies().get(
project=gcp.project,
targetHttpProxy=target_proxy_name).execute()
gcp.target_proxy = GcpResource(target_proxy_name, result['selfLink'])
except Exception as e:
gcp.errors.append(e)
gcp.target_proxy = GcpResource(target_proxy_name, None)
def get_global_forwarding_rule(gcp, forwarding_rule_name):
try:
result = gcp.compute.globalForwardingRules().get(
project=gcp.project, forwardingRule=forwarding_rule_name).execute()
gcp.global_forwarding_rule = GcpResource(forwarding_rule_name,
result['selfLink'])
except Exception as e:
gcp.errors.append(e)
gcp.global_forwarding_rule = GcpResource(forwarding_rule_name, None)
def get_instance_template(gcp, template_name):
try:
result = gcp.compute.instanceTemplates().get(
project=gcp.project, instanceTemplate=template_name).execute()
gcp.instance_template = GcpResource(template_name, result['selfLink'])
except Exception as e:
gcp.errors.append(e)
gcp.instance_template = GcpResource(template_name, None)
def get_instance_group(gcp, zone, instance_group_name):
try:
result = gcp.compute.instanceGroups().get(
project=gcp.project, zone=zone,
instanceGroup=instance_group_name).execute()
gcp.service_port = result['namedPorts'][0]['port']
instance_group = InstanceGroup(instance_group_name, result['selfLink'],
zone)
except Exception as e:
gcp.errors.append(e)
instance_group = InstanceGroup(instance_group_name, None, zone)
gcp.instance_groups.append(instance_group)
return instance_group
def delete_global_forwarding_rule(gcp, name=None):
if name:
forwarding_rule_to_delete = name
else:
forwarding_rule_to_delete = gcp.global_forwarding_rule.name
try:
logger.debug('Deleting forwarding rule %s', forwarding_rule_to_delete)
result = gcp.compute.globalForwardingRules().delete(
project=gcp.project,
forwardingRule=forwarding_rule_to_delete).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_target_proxy(gcp, name=None):
if name:
proxy_to_delete = name
else:
proxy_to_delete = gcp.target_proxy.name
try:
if gcp.alpha_compute:
logger.debug('Deleting grpc proxy %s', proxy_to_delete)
result = gcp.alpha_compute.targetGrpcProxies().delete(
project=gcp.project, targetGrpcProxy=proxy_to_delete).execute(
num_retries=_GCP_API_RETRIES)
else:
logger.debug('Deleting http proxy %s', proxy_to_delete)
result = gcp.compute.targetHttpProxies().delete(
project=gcp.project, targetHttpProxy=proxy_to_delete).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_url_map(gcp, name=None):
if name:
url_map_to_delete = name
else:
url_map_to_delete = gcp.url_map.name
try:
logger.debug('Deleting url map %s', url_map_to_delete)
result = gcp.compute.urlMaps().delete(
project=gcp.project,
urlMap=url_map_to_delete).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_backend_service(gcp, backend_service):
try:
logger.debug('Deleting backend service %s', backend_service.name)
result = gcp.compute.backendServices().delete(
project=gcp.project, backendService=backend_service.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_backend_services(gcp):
for backend_service in gcp.backend_services:
delete_backend_service(gcp, backend_service)
def delete_firewall(gcp):
try:
logger.debug('Deleting firewall %s',
gcp.health_check_firewall_rule.name)
result = gcp.compute.firewalls().delete(
project=gcp.project,
firewall=gcp.health_check_firewall_rule.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_health_check(gcp):
try:
logger.debug('Deleting health check %s', gcp.health_check.name)
result = gcp.compute.healthChecks().delete(
project=gcp.project, healthCheck=gcp.health_check.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_instance_groups(gcp):
for instance_group in gcp.instance_groups:
try:
logger.debug('Deleting instance group %s %s', instance_group.name,
instance_group.zone)
result = gcp.compute.instanceGroupManagers().delete(
project=gcp.project,
zone=instance_group.zone,
instanceGroupManager=instance_group.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_zone_operation(gcp,
instance_group.zone,
result['name'],
timeout_sec=_WAIT_FOR_BACKEND_SEC)
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_instance_template(gcp):
try:
logger.debug('Deleting instance template %s',
gcp.instance_template.name)
result = gcp.compute.instanceTemplates().delete(
project=gcp.project,
instanceTemplate=gcp.instance_template.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def patch_backend_service(gcp,
backend_service,
instance_groups,
balancing_mode='UTILIZATION',
max_rate=1,
circuit_breakers=None):
if gcp.alpha_compute:
compute_to_use = gcp.alpha_compute
else:
compute_to_use = gcp.compute
config = {
'backends': [{
'group': instance_group.url,
'balancingMode': balancing_mode,
'maxRate': max_rate if balancing_mode == 'RATE' else None
} for instance_group in instance_groups],
'circuitBreakers': circuit_breakers,
}
logger.debug('Sending GCP request with body=%s', config)
result = compute_to_use.backendServices().patch(
project=gcp.project, backendService=backend_service.name,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp,
result['name'],
timeout_sec=_WAIT_FOR_BACKEND_SEC)
def resize_instance_group(gcp,
instance_group,
new_size,
timeout_sec=_WAIT_FOR_OPERATION_SEC):
result = gcp.compute.instanceGroupManagers().resize(
project=gcp.project,
zone=instance_group.zone,
instanceGroupManager=instance_group.name,
size=new_size).execute(num_retries=_GCP_API_RETRIES)
wait_for_zone_operation(gcp,
instance_group.zone,
result['name'],
timeout_sec=360)
wait_for_instance_group_to_reach_expected_size(gcp, instance_group,
new_size, timeout_sec)
def patch_url_map_backend_service(gcp,
backend_service=None,
services_with_weights=None,
route_rules=None):
'''change url_map's backend service
Only one of backend_service and service_with_weights can be not None.
'''
if gcp.alpha_compute:
compute_to_use = gcp.alpha_compute
else:
compute_to_use = gcp.compute
if backend_service and services_with_weights:
raise ValueError(
'both backend_service and service_with_weights are not None.')
default_service = backend_service.url if backend_service else None
default_route_action = {
'weightedBackendServices': [{
'backendService': service.url,
'weight': w,
} for service, w in list(services_with_weights.items())]
} if services_with_weights else None
config = {
'pathMatchers': [{
'name': _PATH_MATCHER_NAME,
'defaultService': default_service,
'defaultRouteAction': default_route_action,
'routeRules': route_rules,
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = compute_to_use.urlMaps().patch(
project=gcp.project, urlMap=gcp.url_map.name,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
def wait_for_instance_group_to_reach_expected_size(gcp, instance_group,
expected_size, timeout_sec):
start_time = time.time()
while True:
current_size = len(get_instance_names(gcp, instance_group))
if current_size == expected_size:
break
if time.time() - start_time > timeout_sec:
raise Exception(
'Instance group had expected size %d but actual size %d' %
(expected_size, current_size))
time.sleep(2)
def wait_for_global_operation(gcp,
operation,
timeout_sec=_WAIT_FOR_OPERATION_SEC):
start_time = time.time()
while time.time() - start_time <= timeout_sec:
result = gcp.compute.globalOperations().get(
project=gcp.project,
operation=operation).execute(num_retries=_GCP_API_RETRIES)
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
return
time.sleep(2)
raise Exception('Operation %s did not complete within %d' %
(operation, timeout_sec))
def wait_for_zone_operation(gcp,
zone,
operation,
timeout_sec=_WAIT_FOR_OPERATION_SEC):
start_time = time.time()
while time.time() - start_time <= timeout_sec:
result = gcp.compute.zoneOperations().get(
project=gcp.project, zone=zone,
operation=operation).execute(num_retries=_GCP_API_RETRIES)
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
return
time.sleep(2)
raise Exception('Operation %s did not complete within %d' %
(operation, timeout_sec))
def wait_for_healthy_backends(gcp,
backend_service,
instance_group,
timeout_sec=_WAIT_FOR_BACKEND_SEC):
start_time = time.time()
config = {'group': instance_group.url}
instance_names = get_instance_names(gcp, instance_group)
expected_size = len(instance_names)
while time.time() - start_time <= timeout_sec:
for instance_name in instance_names:
try:
status = get_serving_status(instance_name, gcp.service_port)
logger.info('serving status response from %s: %s',
instance_name, status)
except grpc.RpcError as rpc_error:
logger.info('checking serving status of %s failed: %s',
instance_name, rpc_error)
result = gcp.compute.backendServices().getHealth(
project=gcp.project,
backendService=backend_service.name,
body=config).execute(num_retries=_GCP_API_RETRIES)
if 'healthStatus' in result:
logger.info('received GCP healthStatus: %s', result['healthStatus'])
healthy = True
for instance in result['healthStatus']:
if instance['healthState'] != 'HEALTHY':
healthy = False
break
if healthy and expected_size == len(result['healthStatus']):
return
else:
logger.info('no healthStatus received from GCP')
time.sleep(5)
raise Exception('Not all backends became healthy within %d seconds: %s' %
(timeout_sec, result))
def get_instance_names(gcp, instance_group):
instance_names = []
result = gcp.compute.instanceGroups().listInstances(
project=gcp.project,
zone=instance_group.zone,
instanceGroup=instance_group.name,
body={
'instanceState': 'ALL'
}).execute(num_retries=_GCP_API_RETRIES)
if 'items' not in result:
return []
for item in result['items']:
# listInstances() returns the full URL of the instance, which ends with
# the instance name. compute.instances().get() requires using the
# instance name (not the full URL) to look up instance details, so we
# just extract the name manually.
instance_name = item['instance'].split('/')[-1]
instance_names.append(instance_name)
logger.info('retrieved instance names: %s', instance_names)
return instance_names
def clean_up(gcp):
if gcp.global_forwarding_rule:
delete_global_forwarding_rule(gcp)
if gcp.target_proxy:
delete_target_proxy(gcp)
if gcp.url_map:
delete_url_map(gcp)
delete_backend_services(gcp)
if gcp.health_check_firewall_rule:
delete_firewall(gcp)
if gcp.health_check:
delete_health_check(gcp)
delete_instance_groups(gcp)
if gcp.instance_template:
delete_instance_template(gcp)
class InstanceGroup(object):
def __init__(self, name, url, zone):
self.name = name
self.url = url
self.zone = zone
class GcpResource(object):
def __init__(self, name, url):
self.name = name
self.url = url
class GcpState(object):
def __init__(self, compute, alpha_compute, project, project_num):
self.compute = compute
self.alpha_compute = alpha_compute
self.project = project
self.project_num = project_num
self.health_check = None
self.health_check_firewall_rule = None
self.backend_services = []
self.url_map = None
self.target_proxy = None
self.global_forwarding_rule = None
self.service_port = None
self.instance_template = None
self.instance_groups = []
self.errors = []
logging.debug(
"script start time: %s",
datetime.datetime.now(
datetime.timezone.utc).astimezone().strftime("%Y-%m-%dT%H:%M:%S %Z"))
logging.debug("logging local timezone: %s",
datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo)
alpha_compute = None
if args.compute_discovery_document:
with open(args.compute_discovery_document, 'r') as discovery_doc:
compute = googleapiclient.discovery.build_from_document(
discovery_doc.read())
if not args.only_stable_gcp_apis and args.alpha_compute_discovery_document:
with open(args.alpha_compute_discovery_document, 'r') as discovery_doc:
alpha_compute = googleapiclient.discovery.build_from_document(
discovery_doc.read())
else:
compute = googleapiclient.discovery.build('compute', 'v1')
if not args.only_stable_gcp_apis:
alpha_compute = googleapiclient.discovery.build('compute', 'alpha')
test_results = {}
failed_tests = []
try:
gcp = GcpState(compute, alpha_compute, args.project_id, args.project_num)
gcp_suffix = args.gcp_suffix
health_check_name = _BASE_HEALTH_CHECK_NAME + gcp_suffix
if not args.use_existing_gcp_resources:
if args.keep_gcp_resources:
# Auto-generating a unique suffix in case of conflict should not be
# combined with --keep_gcp_resources, as the suffix actually used
# for GCP resources will not match the provided --gcp_suffix value.
num_attempts = 1
else:
num_attempts = 5
for i in range(num_attempts):
try:
logger.info('Using GCP suffix %s', gcp_suffix)
create_health_check(gcp, health_check_name)
break
except googleapiclient.errors.HttpError as http_error:
gcp_suffix = '%s-%04d' % (gcp_suffix, random.randint(0, 9999))
health_check_name = _BASE_HEALTH_CHECK_NAME + gcp_suffix
logger.exception('HttpError when creating health check')
if gcp.health_check is None:
raise Exception('Failed to create health check name after %d '
'attempts' % num_attempts)
firewall_name = _BASE_FIREWALL_RULE_NAME + gcp_suffix
backend_service_name = _BASE_BACKEND_SERVICE_NAME + gcp_suffix
alternate_backend_service_name = _BASE_BACKEND_SERVICE_NAME + '-alternate' + gcp_suffix
extra_backend_service_name = _BASE_BACKEND_SERVICE_NAME + '-extra' + gcp_suffix
more_extra_backend_service_name = _BASE_BACKEND_SERVICE_NAME + '-more-extra' + gcp_suffix
url_map_name = _BASE_URL_MAP_NAME + gcp_suffix
service_host_name = _BASE_SERVICE_HOST + gcp_suffix
target_proxy_name = _BASE_TARGET_PROXY_NAME + gcp_suffix
forwarding_rule_name = _BASE_FORWARDING_RULE_NAME + gcp_suffix
template_name = _BASE_TEMPLATE_NAME + gcp_suffix
instance_group_name = _BASE_INSTANCE_GROUP_NAME + gcp_suffix
same_zone_instance_group_name = _BASE_INSTANCE_GROUP_NAME + '-same-zone' + gcp_suffix
secondary_zone_instance_group_name = _BASE_INSTANCE_GROUP_NAME + '-secondary-zone' + gcp_suffix
potential_service_ports = list(args.service_port_range)
random.shuffle(potential_service_ports)
if args.use_existing_gcp_resources:
logger.info('Reusing existing GCP resources')
get_health_check(gcp, health_check_name)
get_health_check_firewall_rule(gcp, firewall_name)
backend_service = get_backend_service(gcp, backend_service_name)
alternate_backend_service = get_backend_service(
gcp, alternate_backend_service_name)
extra_backend_service = get_backend_service(gcp,
extra_backend_service_name,
record_error=False)
more_extra_backend_service = get_backend_service(
gcp, more_extra_backend_service_name, record_error=False)
get_url_map(gcp, url_map_name)
get_target_proxy(gcp, target_proxy_name)
get_global_forwarding_rule(gcp, forwarding_rule_name)
get_instance_template(gcp, template_name)
instance_group = get_instance_group(gcp, args.zone, instance_group_name)
same_zone_instance_group = get_instance_group(
gcp, args.zone, same_zone_instance_group_name)
secondary_zone_instance_group = get_instance_group(
gcp, args.secondary_zone, secondary_zone_instance_group_name)
if gcp.errors:
raise Exception(gcp.errors)
else:
create_health_check_firewall_rule(gcp, firewall_name)
backend_service = add_backend_service(gcp, backend_service_name)
alternate_backend_service = add_backend_service(
gcp, alternate_backend_service_name)
create_url_map(gcp, url_map_name, backend_service, service_host_name)
create_target_proxy(gcp, target_proxy_name)
create_global_forwarding_rule(gcp, forwarding_rule_name,
potential_service_ports)
if not gcp.service_port:
raise Exception(
'Failed to find a valid ip:port for the forwarding rule')
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
startup_script = get_startup_script(args.path_to_server_binary,
gcp.service_port)
create_instance_template(gcp, template_name, args.network,
args.source_image, args.machine_type,
startup_script)
instance_group = add_instance_group(gcp, args.zone, instance_group_name,
_INSTANCE_GROUP_SIZE)
patch_backend_service(gcp, backend_service, [instance_group])
same_zone_instance_group = add_instance_group(
gcp, args.zone, same_zone_instance_group_name, _INSTANCE_GROUP_SIZE)
secondary_zone_instance_group = add_instance_group(
gcp, args.secondary_zone, secondary_zone_instance_group_name,
_INSTANCE_GROUP_SIZE)
wait_for_healthy_backends(gcp, backend_service, instance_group)
if args.test_case:
client_env = dict(os.environ)
if original_grpc_trace:
client_env['GRPC_TRACE'] = original_grpc_trace
if original_grpc_verbosity:
client_env['GRPC_VERBOSITY'] = original_grpc_verbosity
bootstrap_server_features = []
if gcp.service_port == _DEFAULT_SERVICE_PORT:
server_uri = service_host_name
else:
server_uri = service_host_name + ':' + str(gcp.service_port)
if args.xds_v3_support:
client_env['GRPC_XDS_EXPERIMENTAL_V3_SUPPORT'] = 'true'
bootstrap_server_features.append('xds_v3')
if args.bootstrap_file:
bootstrap_path = os.path.abspath(args.bootstrap_file)
else:
with tempfile.NamedTemporaryFile(delete=False) as bootstrap_file:
bootstrap_file.write(
_BOOTSTRAP_TEMPLATE.format(
node_id='projects/%s/networks/%s/nodes/%s' %
(gcp.project_num, args.network.split('/')[-1],
uuid.uuid1()),
server_features=json.dumps(
bootstrap_server_features)).encode('utf-8'))
bootstrap_path = bootstrap_file.name
client_env['GRPC_XDS_BOOTSTRAP'] = bootstrap_path
client_env['GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING'] = 'true'
client_env['GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT'] = 'true'
client_env['GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION'] = 'true'
for test_case in args.test_case:
if test_case in _V3_TEST_CASES and not args.xds_v3_support:
logger.info('skipping test %s due to missing v3 support',
test_case)
continue
if test_case in _ALPHA_TEST_CASES and not gcp.alpha_compute:
logger.info('skipping test %s due to missing alpha support',
test_case)
continue
if test_case in [
'api_listener', 'forwarding_rule_port_match',
'forwarding_rule_default_port'
] and CLIENT_HOSTS:
logger.info(
'skipping test %s because test configuration is'
'not compatible with client processes on existing'
'client hosts', test_case)
continue
if test_case == 'forwarding_rule_default_port':
server_uri = service_host_name
result = jobset.JobResult()
log_dir = os.path.join(_TEST_LOG_BASE_DIR, test_case)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
test_log_filename = os.path.join(log_dir, _SPONGE_LOG_NAME)
test_log_file = open(test_log_filename, 'w+')
client_process = None
if test_case in _TESTS_TO_RUN_MULTIPLE_RPCS:
rpcs_to_send = '--rpc="UnaryCall,EmptyCall"'
else:
rpcs_to_send = '--rpc="UnaryCall"'
if test_case in _TESTS_TO_SEND_METADATA:
metadata_to_send = '--metadata="EmptyCall:{keyE}:{valueE},UnaryCall:{keyU}:{valueU},UnaryCall:{keyNU}:{valueNU}"'.format(
keyE=_TEST_METADATA_KEY,
valueE=_TEST_METADATA_VALUE_EMPTY,
keyU=_TEST_METADATA_KEY,
valueU=_TEST_METADATA_VALUE_UNARY,
keyNU=_TEST_METADATA_NUMERIC_KEY,
valueNU=_TEST_METADATA_NUMERIC_VALUE)
else:
# Setting the arg explicitly to empty with '--metadata=""'
# makes C# client fail
# (see https://github.com/commandlineparser/commandline/issues/412),
# so instead we just rely on clients using the default when
# metadata arg is not specified.
metadata_to_send = ''
# TODO(ericgribkoff) Temporarily disable fail_on_failed_rpc checks
# in the client. This means we will ignore intermittent RPC
# failures (but this framework still checks that the final result
# is as expected).
#
# Reason for disabling this is, the resources are shared by
# multiple tests, and a change in previous test could be delayed
# until the second test starts. The second test may see
# intermittent failures because of that.
#
# A fix is to not share resources between tests (though that does
# mean the tests will be significantly slower due to creating new
# resources).
fail_on_failed_rpc = ''
try:
if not CLIENT_HOSTS:
client_cmd_formatted = args.client_cmd.format(
server_uri=server_uri,
stats_port=args.stats_port,
qps=args.qps,
fail_on_failed_rpc=fail_on_failed_rpc,
rpcs_to_send=rpcs_to_send,
metadata_to_send=metadata_to_send)
logger.debug('running client: %s', client_cmd_formatted)
client_cmd = shlex.split(client_cmd_formatted)
client_process = subprocess.Popen(client_cmd,
env=client_env,
stderr=subprocess.STDOUT,
stdout=test_log_file)
if test_case == 'backends_restart':
test_backends_restart(gcp, backend_service, instance_group)
elif test_case == 'change_backend_service':
test_change_backend_service(gcp, backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group)
elif test_case == 'gentle_failover':
test_gentle_failover(gcp, backend_service, instance_group,
secondary_zone_instance_group)
elif test_case == 'load_report_based_failover':
test_load_report_based_failover(
gcp, backend_service, instance_group,
secondary_zone_instance_group)
elif test_case == 'ping_pong':
test_ping_pong(gcp, backend_service, instance_group)
elif test_case == 'remove_instance_group':
test_remove_instance_group(gcp, backend_service,
instance_group,
same_zone_instance_group)
elif test_case == 'round_robin':
test_round_robin(gcp, backend_service, instance_group)
elif test_case == 'secondary_locality_gets_no_requests_on_partial_primary_failure':
test_secondary_locality_gets_no_requests_on_partial_primary_failure(
gcp, backend_service, instance_group,
secondary_zone_instance_group)
elif test_case == 'secondary_locality_gets_requests_on_primary_failure':
test_secondary_locality_gets_requests_on_primary_failure(
gcp, backend_service, instance_group,
secondary_zone_instance_group)
elif test_case == 'traffic_splitting':
test_traffic_splitting(gcp, backend_service, instance_group,
alternate_backend_service,
same_zone_instance_group)
elif test_case == 'path_matching':
test_path_matching(gcp, backend_service, instance_group,
alternate_backend_service,
same_zone_instance_group)
elif test_case == 'header_matching':
test_header_matching(gcp, backend_service, instance_group,
alternate_backend_service,
same_zone_instance_group)
elif test_case == 'circuit_breaking':
test_circuit_breaking(gcp, backend_service, instance_group,
same_zone_instance_group)
elif test_case == 'timeout':
test_timeout(gcp, backend_service, instance_group)
elif test_case == 'fault_injection':
test_fault_injection(gcp, backend_service, instance_group)
elif test_case == 'api_listener':
server_uri = test_api_listener(gcp, backend_service,
instance_group,
alternate_backend_service)
elif test_case == 'forwarding_rule_port_match':
server_uri = test_forwarding_rule_port_match(
gcp, backend_service, instance_group)
elif test_case == 'forwarding_rule_default_port':
server_uri = test_forwarding_rule_default_port(
gcp, backend_service, instance_group)
elif test_case == 'metadata_filter':
test_metadata_filter(gcp, backend_service, instance_group,
alternate_backend_service,
same_zone_instance_group)
elif test_case == 'csds':
test_csds(gcp, backend_service, instance_group, server_uri)
else:
logger.error('Unknown test case: %s', test_case)
sys.exit(1)
if client_process and client_process.poll() is not None:
raise Exception(
'Client process exited prematurely with exit code %d' %
client_process.returncode)
result.state = 'PASSED'
result.returncode = 0
except Exception as e:
logger.exception('Test case %s failed', test_case)
failed_tests.append(test_case)
result.state = 'FAILED'
result.message = str(e)
if args.halt_after_fail:
# Stop the test suite if one case failed.
raise
finally:
if client_process:
if client_process.returncode:
logger.info('Client exited with code %d' %
client_process.returncode)
else:
client_process.terminate()
test_log_file.close()
# Workaround for Python 3, as report_utils will invoke decode() on
# result.message, which has a default value of ''.
result.message = result.message.encode('UTF-8')
test_results[test_case] = [result]
if args.log_client_output:
logger.info('Client output:')
with open(test_log_filename, 'r') as client_output:
logger.info(client_output.read())
if not os.path.exists(_TEST_LOG_BASE_DIR):
os.makedirs(_TEST_LOG_BASE_DIR)
report_utils.render_junit_xml_report(test_results,
os.path.join(
_TEST_LOG_BASE_DIR,
_SPONGE_XML_NAME),
suite_name='xds_tests',
multi_target=True)
if failed_tests:
logger.error('Test case(s) %s failed', failed_tests)
sys.exit(1)
finally:
keep_resources = args.keep_gcp_resources
if args.halt_after_fail and failed_tests:
logger.info(
'Halt after fail triggered, exiting without cleaning up resources')
keep_resources = True
if not keep_resources:
logger.info('Cleaning up GCP resources. This may take some time.')
clean_up(gcp)
| nicolasnoble/grpc | tools/run_tests/run_xds_tests.py | Python | apache-2.0 | 148,710 |
#-*- coding: utf-8 -*-
'''
Created on Jul 4, 2013
@author: jin
'''
from django.contrib import admin
from apps.agent.models import Client, RecommendRecord
class ClientAdmin(admin.ModelAdmin):
search_fields = ('username','user_username','IDCard')
class RecommendRecordAdmin(admin.ModelAdmin):
search_fields = ('user_username','client_username')
admin.site.register(Client,ClientAdmin)
admin.site.register(RecommendRecord,RecommendRecordAdmin)
| SnailJin/house | apps/agent/admin.py | Python | apache-2.0 | 461 |
#!/usr/bin/env python
"""
Created by _UserName_
11/28/2013
"""
import Connect, DB
import uuid
def Register():
# Generate A Unique Idenfifier
ident = uuid.uuid4().hex
# Allow For Database Manipulation
database = DB.DB()
# Create Database
database.create()
# Add Self To Database
externalIP = "127.0.0.1"
database.personalInfo(ident, externalIP)
| Us3rNam/F2F-Network | Register.py | Python | artistic-2.0 | 398 |
"""
hmmer module
"""
from __future__ import print_function
from mungo.mungoCore import *
import blast, sequence
from mungo.useful import smartopen, extractRootName, ClassFromDict, warnDeprecated
import sys, re, warnings
hmmer2frame = {0: 1, 1: 2, 2: 3, 3: -1, 4: -2, 5: -3}
frame2hmmer = dict([(v,k) for k,v in hmmer2frame.iteritems()])
def HmmerFile(iFileHandle, **kw):
"Factory function returning a HmmerFileReader"
return HmmerReader(iFileHandle, **kw)
class HmmerReader(AbstractDataReader):
def __init__(self, iFileHandle, seqType=None, eValueCutoff=None, scoreCutoff=None):
super(HmmerReader, self).__init__(iFileHandle)
self.seqType = seqType
self.eValueCutoff = eValueCutoff
self.scoreCutoff = scoreCutoff
def _generator(self):
"""Return an iterator to a HMMer file."""
if self.seqType in [Domain, SixFrameDomain, BlockSixFrameDomain, OrfDomain, OrfDomain2]:
_Domain = self.seqType
elif self.seqType=='SixFrame':
_Domain = SixFrameDomain
elif self.seqType=='BlockSixFrame':
_Domain = BlockSixFrameDomain
elif self.seqType=='ORFs':
_Domain = OrfDomain
else:
_Domain = Domain
startToken = '^Parsed for domains'
endToken = '^Alignments of top-scoring domains'
abortToken = '\[no hits above thresholds\]'
startRegex = re.compile(startToken)
if not jumpToMatch(self.iFile, startRegex):
raise Exception('No match found. File may be empty.')
# 3. Parse domain details
line = self.iFile.next()
line = self.iFile.next()
endRegex = re.compile(endToken)
abortRegex = re.compile(abortToken)
domains = []
for line in self.iFile:
line = line.strip()
if endRegex.match(line) or abortRegex.match(line):
break
elif not line:
continue
tokens = line.split()
d = _Domain(dict(zip(Domain.attributes[1:], tokens)))
if (self.eValueCutoff and d.eValue>self.eValueCutoff) or \
(self.scoreCutoff and d.score<self.scoreCutoff): continue
yield d
class PfamReader(AbstractDataReader):
def __init__(self, iFileHandle, eValueCutoff=None, scoreCutoff=None):
super(PfamReader, self).__init__(iFileHandle)
self.eValueCutoff = eValueCutoff
self.scoreCutoff = scoreCutoff
def _generator(self):
pass
class Domain(AbstractFeature):
"""Domain feature class"""
attributes = ['domain', 'accession', 'count', 'sStart', 'sEnd',
'sCode', 'qStart', 'qEnd', 'qCode', 'score', 'eValue']
converters = zip(
['qStart','qEnd','sStart','sEnd','score','eValue'],
[int,int,int,int,float,float])
format = attributesToFormat(attributes)
def __init__(self, *args, **kw):
"""Constructor:
@param args: HMMer field values
@type args: list, dict, Domain
Optional keywords:
@keyword domain: Domain name
@keyword accession: Subject name
@keyword count: Id/total hits on subject
@keyword sStart:
@keyword sEnd:
@keyword sCode:
@keyword qStart:
@keyword qEnd:
@keyword qCode:
@keyword score: Bit score
@keyword eValue:
"""
super(Domain, self).__init__(*args, **kw)
self.genomic = False
def __repr__(self):
d = {}
for k,v in self.__dict__.iteritems():
d[k] = v
return self.format % d
def getTokens(self):
return [self.__dict__[key] for key in self.attributes]
def addAttribute(self, attribute, default=None):
self.attributes.append(attribute)
self.format = self.format + '\t%%(%s)s' % attribute
self.__dict__[attribute] = default
def addStrandAttribute(self, strand=None):
self.addAttribute('strand', strand)
def swapStartEnd(self):
if self.sStart>self.sEnd:
self.sStart,self.sEnd = self.sEnd,self.sStart
def getSequence(self, blastdb, getAll=False, convertAccession=lambda x: x):
if getAll:
start = 0
end = 0
else:
start = self.sStart
end = self.sEnd
accession = convertAccession(self.accession)
h,s = blast.getSequence(blastdb, accession, start, end)
return h,s
@staticmethod
def fromGenomic(tokens):
strand = tokens[-1]
d = Domain(tokens[0:-1])
d.genomic = True
d.addStrandAttribute(strand)
return d
class OrfDomain(Domain):
def toGenomic(self, orfStart, orfStrand, doSwapStartEnd=True):
"""Convert from ORF to genomic coordinates."""
self.genomic = True
self.sStart,self.sEnd = convertOrfToGenomic(
self.sStart, self.sEnd, orfStrand, orfStart)
self.addStrandAttribute(orfStrand)
if doSwapStartEnd:
self.swapStartEnd()
class OrfDomain2(Domain):
"ORF domain class for use with my ORF files"
def toGenomic(self, doSwapStartEnd=True):
"""Convert from ORF to genomic coordinates."""
self.genomic = True
o = parseOrfHeader(self.accession)
self.sStart,self.sEnd = convertOrfToGenomic(
self.sStart, self.sEnd, o.strand, o.start)
self.addStrandAttribute(o.strand)
if doSwapStartEnd:
self.swapStartEnd()
class SixFrameDomain(Domain):
def toGenomic(self, L, doSwapStartEnd=True):
"""Convert from 6 frame to genomic coordinates.
@param L: Length of DNA sequence.
"""
self.genomic = True
o = parseSixFrameHeader(self.accession)
self.sStart,self.sEnd = convertSixFrameToGenomic(
self.sStart, self.sEnd, o.frame, L)
self.accession = o.name
self.strand = o.strand
self.addStrandAttribute(o.strand)
if doSwapStartEnd:
self.swapStartEnd()
def toBlockCoords(self, L=1e99, blockSize=5000000, delimiter='.'):
self.accession, self.sStart, self.sEnd = \
blast.genomeToBlock(
self.accession, self.sStart, self.sEnd, L=L,
blockSize=blockSize, delimiter=delimiter)
def getSequenceFromString(self, seq):
s = seq[self.sStart-1:self.sEnd]
if self.strand=='-':
s = sequence.reverseComplement(s)
return s
def getSequence(self, blastDb, padFivePrime=0, padThreePrime=0):
if self.genomic:
start = max(1,self.sStart-padFivePrime)
end = self.sEnd+padThreePrime
h,s = blast.getSequence(blastDb, self.accession, start, end, self.strand)
else:
raise Exception('You must call the toGenomic method first.')
return h,s
class BlockSixFrameDomain(Domain):
def toGenomic(self, relative=False, doSwapStartEnd=True, relDelimiter=':'):
"""Convert from 6 frame to genomic coordinates."""
self.genomic = True
chrom,blockStart,blockEnd,gStart,gEnd,strand = \
convertBlockSixFrameToGenomic(
self.accession, self.sStart, self.sEnd)
if relative:
self.accession = '%s%s%i-%i' % (chrom,relDelimiter,blockStart,blockEnd)
self.sStart = gStart
self.sEnd = gEnd
else:
self.accession = chrom
self.sStart = blockStart + gStart - 1
self.sEnd = blockStart + gEnd - 1
self.addStrandAttribute(strand)
if doSwapStartEnd:
self.swapStartEnd()
class GenomicDomain(AbstractFeature):
"""GenomicDomain feature class"""
attributes = ['domain', 'accession', 'count', 'sStart', 'sEnd',
'sCode', 'qStart', 'qEnd', 'qCode', 'score', 'eValue']
converters = zip(
['qStart','qEnd','sStart','sEnd','score','eValue'],
[int,int,int,int,float,float])
format = attributesToFormat(attributes)
def __init__(self, *args, **kw):
"""Constructor:
@param args: HMMer field values
@type args: list, dict, Domain
Optional keywords:
@keyword domain: Domain name
@keyword accession: Subject name
@keyword count: Id/total hits on subject
@keyword sStart:
@keyword sEnd:
@keyword sCode:
@keyword qStart:
@keyword qEnd:
@keyword qCode:
@keyword score: Bit score
@keyword eValue:
@keyword strand:
"""
super(GenomicDomain, self).__init__(*args, **kw)
def toDict(self):
return self.__dict__
def toList(self):
return self.__dict__.items()
def __repr__(self):
try:
d = {}
for k,v in self.__dict__.iteritems():
d[k] = v
return self.format % d
except:
return str(self.__dict__)
def toBlockCoords(self, L=1e99, blockSize=5000000, delimiter='.'):
self.accession, self.sStart, self.sEnd = \
blast.genomeToBlock(
self.accession, self.sStart, self.sEnd, L=L,
blockSize=blockSize, delimiter=delimiter)
def getSequence(self, blastDb, padFivePrime=0, padThreePrime=0):
start = max(1,self.sStart-padFivePrime)
end = self.sEnd+padThreePrime
h,s = blast.getSequence(blastDb, self.accession, start, end, self.strand)
return h,s
def loadGenomicDomains(filename):
data = []
gene = []
for line in open(filename):
line = line.strip()
if not line:
continue
elif line[0] in ['#', '>']:
if gene:
data.append(gene)
gene = []
else:
tokens = line.split('\t')
d = GenomicDomain(tokens)
gene.append(d)
data.append(gene)
return data
def jumpToMatch(iFile, regex):
"""Jump to regex match in file.
@param iFile: File object
@param regex: Compiled regex object
@return: True if successful, False otherwise
"""
for line in iFile:
if regex.match(line):
return True
return False
def extractUptoMatch(iFile, regex):
"""Extract up to regex match from file.
@param iFile: File object
@param regex: Compiled regex object
@return: string
"""
block = []
for line in iFile:
if regex.match(line):
break
else:
block.append(line.rstrip())
return block
def parseSixFrameHeader(header):
"""Parse a 6 frame header (from translate or python).
@param header: Six frame header "<name>:<frame>" or "<name>.<start>-<end>:<frame>"
(assumes input frame is hmmer frame (0-5)).
@return: a simple class with attributes name, start, end, strand and frame.
"""
header = header.strip()
regex = re.compile(
'(?P<name>\w+)([\.|:](?P<start>\d+)[-|,](?P<end>\d+))?:(?P<frame>[0-5])')
rs = regex.search(header)
d = rs.groupdict()
d['frame'] = hmmer2frame[int(d['frame'])]
if d['frame']>0:
d['strand'] = '+'
else:
d['strand'] = '-'
try:
d['start'] = int(d['start'])
d['end'] = int(d['end'])
except:
pass
return ClassFromDict(d)
def parseOrfHeader(header):
"""Parse an ORF header (from extractORFs.py).
@param header: ORF header "<name>.<orfId>.<start>-<end> Length=<length>"
(Length optional).
@return: a simple class with attributes name, start, end, strand and length.
"""
regex = re.compile(
'(?P<name>\w+)\.(?P<orfId>\d+)\.(?P<start>\d+)-(?P<end>\d+)(\SLength=(?P<length>\d+))?')
rs = regex.match(header.strip())
d = rs.groupdict()
try:
d['start'] = int(d['start'])
d['end'] = int(d['end'])
d['length'] = int(d['length'])
except:
pass
if d['start']>d['end']:
d['strand'] = '-'
else:
d['strand'] = '+'
return ClassFromDict(d)
def convertSixFrameToGenomic(start, end, frame, L):
"""Convert 6 frame coords to genomic.
@param start: Amino acid start coord
@param end: Amino acid end coord
@param frame: Frame
@param L: Nucleotide seq length
@return: (gStart, gEnd, strand)
"""
if frame>=0:
gStart = 3*(start-1)+(frame-1)+1
gEnd = 3*(end-1)+(frame-1)+3
else:
gStart = L-(3*(start-1)+abs(frame)-1)
gEnd = L-(3*(end-1)+abs(frame)+1)
return gStart,gEnd
def convertBlockSixFrameToGenomic(block, start, end):
"""Convenience function that takes block 6 frame coords
(block,start,end), extracts the block start/end and frame
and converts them to genomic coords
ie.
chrom.blockStart-blockEnd:frame aaStart aaEnd or
chrom:blockStart-blockEnd:frame aaStart aaEnd
--> chrom,blockStart,blockEnd,gStart,gEnd,strand
@param block: Block accession ("<name>.<blockStart>-<blockEnd>:<frame>")
@param start: Domain start
@param end: Domain end
@return: (chrom, blockStart, blockEnd, gStart, gEnd, strand)
string[:.]number-number:number
"""
#prog = re.compile('\.|-|\:')
#tokens = prog.split(block)
#prog = re.compile("(?P<chrom>[\w]+)[.:](?P<bstart>[0-9]+)-(?P<bend>[0-9]+):(?P<frame>[0-9]+)")
#rs = prog.search(block)
#if rs:
# g = rs.groupdict()
# chrom,blockStart,blockEnd,hmmerFrame = g["chrom"],g["bstart"],g["bend"],g["frame"]
# blockStart = int(blockStart)
# blockEnd = int(blockEnd)
# hmmerFrame = int(hmmerFrame)
# L = blockEnd-blockStart+1
tokens = block.split(":")
if len(tokens)==2:
hmmerFrame = tokens[1]
tokens = tokens[0].split(".")
chrom = tokens[0]
blockStart,blockEnd = tokens[1].split("-")
elif len(tokens)==3:
chrom = tokens[0]
blockStart,blockEnd = tokens[1].split("-")
hmmerFrame = tokens[2]
else:
print(tokens, file=sys.stderr)
raise Exception("Don't know what to do")
blockStart = int(blockStart)
blockEnd = int(blockEnd)
L = blockEnd-blockStart+1
hmmerFrame = int(hmmerFrame)
frame = hmmer2frame[hmmerFrame]
if frame>0:
strand = '+'
else:
strand = '-'
gStart,gEnd = convertSixFrameToGenomic(start, end, frame, L)
return chrom,blockStart,blockEnd,gStart,gEnd,strand
def convertGenomicToBlockCoords(domain, chrLen, blockSize=5000000, delimiter='.'):
domain.accession, domain.sStart, domain.sEnd = \
blast.genomeToBlock(
domain.accession, domain.sStart, domain.sEnd,
L=chrLen, blockSize=blockSize, delimiter=delimiter)
return domain
def convertOrfToGenomic(start, end, strand, orfStart):
"""Convert domain coordinates in ORF to genomic.
@param start: Domain start coord
@param end: Domain end coord
@param strand: Strand
@param orfStart: ORF start coord
@return: (gStart, gEnd)
"""
if strand=='+':
gStart = orfStart + 3*(start-1)
gEnd = orfStart + 3*(end-1) + 2
else:
gStart = orfStart - 3*(start-1)
gEnd = orfStart - 3*(end-1) - 2
return gStart, gEnd
def loadDomains(iFileHandle):
"""Load hmmer domain results.
@param iFileHandle: Input file or filename
@param seqType: Type of sequence searched
[None (default), 'SixFrame', 'BlockSixFrame' or 'ORFs']
@param eValueCutoff: E-value threshold (default None)
@param scoreCutoff: Score threshold (default None)
@return: list of domains
"""
domains = []
for d in HmmerFile(iFileHandle):
domains.append(d)
return domains
| PapenfussLab/Mungo | mungo/hmmer.py | Python | artistic-2.0 | 15,959 |
#! /usr/bin/env python
"""
stats -- Prints some channel information.
disconnect -- Disconnect the bot. The bot will try to reconnect
after 60 seconds.
die -- Let the bot cease to exist.
"""
import liblo
import irc.bot
import irc.strings
from irc.client import ip_numstr_to_quad, ip_quad_to_numstr
class TestBot(irc.bot.SingleServerIRCBot):
def __init__(self, channel, nickname, server, port=6667, OSCport=57120):
irc.bot.SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
self.server = server
self.channel = channel
self.nickname = nickname
try:
self.target = liblo.Address(OSCport)
except liblo.AddressError, err:
print str(err)
sys.exit("OSC address error")
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
c.join(self.channel)
print "connected to:\t" + self.server
def on_privmsg(self, c, e):
self.do_command(e, e.arguments[0])
def on_pubmsg(self, c, e):
a = e.arguments[0].split(":", 1)
if len(a) > 1 and irc.strings.lower(a[0]) == irc.strings.lower(self.connection.get_nickname()):
self.do_command(e, a[1].strip())
return
def do_command(self, e, cmd):
nick = e.source.nick
c = self.connection
target = self.target
c.notice(nick, "--- Channel statistics ---")
msg = liblo.Message(self.nickname) # nickname is osc tag...
#msg.add(nick)
#~ if nick == "iow":
#~ for i in cmd:
#~ #msg.add(ord(i)) #ord: char's ascii number
#~ msg.add(i)
#~ liblo.send(target, msg)
#~ for i in cmd:
#~ msg.add(ord(i))
#~ liblo.send(target, msg)
#~ print msg
for i in cmd:
msg.add(ord(i))
liblo.send(target, msg)
if cmd == "disconnect":
self.disconnect()
elif cmd == "die":
self.die()
elif cmd == "stats":
print 'stats?'
for chname, chobj in self.channels.items():
c.notice(nick, "--- Channel statistics ---")
c.notice(nick, "Channel: " + chname)
users = chobj.users()
users.sort()
c.notice(nick, "Users: " + ", ".join(users))
opers = chobj.opers()
opers.sort()
c.notice(nick, "Opers: " + ", ".join(opers))
voiced = chobj.voiced()
voiced.sort()
c.notice(nick, "Voiced: " + ", ".join(voiced))
else:
c.notice(nick, "Not understood: " + cmd)
def main():
import sys
nickname = "p1n1"
#channel = "#mode+v"
server = "127.0.0.1"
IRCport = 6667
OSCport = 57120
print len(sys.argv)
if len(sys.argv) != 5:
print("Usage: Dtestbot <server[:port]> <channel> <nickname> <oscport>")
print("$ ./ircbot.py 127.0.0.1 \"mode+v\" jk 57124")
sys.exit(1)
s = sys.argv[1].split(":", 1)
server = s[0]
if len(s) == 2:
try:
port = int(s[1])
except ValueError:
print("Error: Erroneous port.")
sys.exit(1)
else:
port = 6667
channel = sys.argv[2]
nickname = sys.argv[3]
OSCport = sys.argv[4]
#print nickname
#bot = TestBot(channel, nickname, server, port)
bot = TestBot("#mode+v", nickname, "127.0.0.1", 6667, OSCport)
#bot = TestBot(channel, nickname, server, IRCport, OSCport)
bot.start()
print 'started...'
if __name__ == "__main__":
main()
| sonoprob/0x56 | bot/py/ircoscbot.py | Python | artistic-2.0 | 3,668 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.location'
db.add_column(u'calendar_event', 'location',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'Event.link_url'
db.add_column(u'calendar_event', 'link_url',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.location'
db.delete_column(u'calendar_event', 'location')
# Deleting field 'Event.link_url'
db.delete_column(u'calendar_event', 'link_url')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'calendar.dummytable': {
'Meta': {'object_name': 'DummyTable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'calendar.event': {
'Meta': {'object_name': 'Event'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': u"orm['sfpirgapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '(1, 2, 3, 4, 5)', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'link_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['calendar.EventType']", 'null': 'True', 'blank': 'True'}),
'zip_import': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'calendar.eventimage': {
'Meta': {'ordering': "('_order',)", 'object_name': 'EventImage'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'file': ('mezzanine.core.fields.FileField', [], {'max_length': '200'}),
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': u"orm['calendar.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'calendar.eventtype': {
'Meta': {'ordering': "['name']", 'object_name': 'EventType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
u'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'sfpirgapp.category': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Category'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('sfpirgapp.fields.MyImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categorys'", 'to': u"orm['auth.User']"})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['calendar'] | orlenko/sfpirg | mezzanine/calendar/migrations/0006_auto__add_field_event_location__add_field_event_link_url.py | Python | bsd-2-clause | 12,037 |
from django.conf import settings
from django.db.models.loading import get_model
from models import MasterSetting, SettingTypes
def get(name, default=None):
try:
setting = MasterSetting.objects.get(name=name)
if setting.type == SettingTypes.INT:
return int(setting.value)
elif setting.type == SettingTypes.FLOAT:
return float(setting.value)
elif setting.type == SettingTypes.FOREIGN:
model = get_model(*setting.foreign_model.split("."))
try:
return model.objects.get(id=int(setting.value))
except model.DoesNotExist:
return default
elif setting.type == SettingTypes.CHOICES:
return setting.value
else:
return setting.value
except MasterSetting.DoesNotExist:
return default
def set(name, value):
setting = MasterSetting.objects.get(name=name)
if setting.type == SettingTypes.INT:
setting.value = str(int(setting.value))
elif setting.type == SettingTypes.FLOAT:
setting.value = str(float(setting.value))
elif setting.type == SettingTypes.FOREIGN:
model = get_model(*setting.foreign_model.split("."))
try:
object_ = model.objects.get(id=int(value.id))
setting.value = str(object_.id)
except model.DoesNotExist:
return None
elif setting.type == SettingTypes.CHOICES:
options_ = settings.MASTER_SETTINGS[setting.name]['options']
if value in options_:
setting.value = value
else:
raise ValueError("Available options are: %s " % str(options_))
else:
setting.value = value
setting.save()
def exists(name):
try:
MasterSetting.objects.get(name=name)
return True
except MasterSetting.DoesNotExist:
return False | MasterAlish/django-ma-settings | ma_settings/master_settings.py | Python | bsd-2-clause | 1,876 |
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.get_session_types_request import GetSessionTypesRequest # noqa: E501
from swagger_client.rest import ApiException
class TestGetSessionTypesRequest(unittest.TestCase):
"""GetSessionTypesRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetSessionTypesRequest(self):
"""Test GetSessionTypesRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.get_session_types_request.GetSessionTypesRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| mindbody/API-Examples | SDKs/Python/test/test_get_session_types_request.py | Python | bsd-2-clause | 1,006 |
# Copyright (c) 2003-present, Jodd Team (http://jodd.org)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
f = open('ConvertBean.java', 'r')
java = f.read()
f.close()
genStart = java.find('@@generated')
java = java[0:genStart + 11]
### -----------------------------------------------------------------
types = [
[0, 'Boolean', 'boolean', 'false'],
[2, 'Integer', 'int', '0'],
[4, 'Long', 'long', '0'],
[6, 'Float', 'float', '0'],
[8, 'Double', 'double', '0'],
[10, 'Short', 'short', '(short) 0'],
[12, 'Character', 'char', '(char) 0'],
[14, 'Byte', 'byte', '(byte) 0'],
]
template = '''
/**
* Converts value to <code>$T</code>.
*/
public $T to$T(Object value) {
return ($T) typeConverters[#].convert(value);
}
/**
* Converts value to <code>$T</code>. Returns default value
* when conversion result is <code>null</code>
*/
public $T to$T(Object value, $T defaultValue) {
$T result = ($T) typeConverters[#].convert(value);
if (result == null) {
return defaultValue;
}
return result;
}
/**
* Converts value to <code>$t</code>. Returns default value
* when conversion result is <code>null</code>.
*/
public $t to$PValue(Object value, $t defaultValue) {
$T result = ($T) typeConverters[#++].convert(value);
if (result == null) {
return defaultValue;
}
return result.$tValue();
}
/**
* Converts value to <code>$t</code> with common default value.
*/
public $t to$PValue(Object value) {
return to$PValue(value, $D);
}
'''
for type in types:
# small type
data = template
data = data.replace('#++', str(type[0] + 1))
data = data.replace('#', str(type[0]))
data = data.replace('$T', type[1])
data = data.replace('$t', type[2])
data = data.replace('$P', type[2].title())
data = data.replace('$D', type[3])
java += data
### -----------------------------------------------------------------
types = [
[16, 'boolean[]', 'BooleanArray', 0],
[17, 'int[]', 'IntegerArray', 0],
[18, 'long[]', 'LongArray', 0],
[19, 'float[]', 'FloatArray', 0],
[20, 'double[]', 'DoubleArray', 0],
[21, 'short[]', 'ShortArray', 0],
[22, 'char[]', 'CharacterArray', 0],
[23, 'String', 'String', 1],
[24, 'String[]', 'StringArray', 0],
[25, 'Class', 'Class', 0],
[26, 'Class[]', 'ClassArray', 0],
[27, 'JDateTime', 'JDateTime', 1],
[28, 'Date', 'Date', 1],
[29, 'Calendar', 'Calendar', 1],
[30, 'BigInteger', 'BigInteger', 1],
[31, 'BigDecimal', 'BigDecimal', 1],
]
template = '''
/**
* Converts value to <code>$T</code>.
*/
public $T to$N(Object value) {
return ($T) typeConverters[#].convert(value);
}
'''
template2 = '''
/**
* Converts value to <code>$T</code>. Returns default value
* when conversion result is <code>null</code>
*/
public $T to$N(Object value, $T defaultValue) {
$T result = ($T) typeConverters[#].convert(value);
if (result == null) {
return defaultValue;
}
return result;
}
'''
for type in types:
# small type
data = template
data = data.replace('#', str(type[0]))
data = data.replace('$T', type[1])
data = data.replace('$N', type[2])
java += data
if type[3] == 1:
data = template2
data = data.replace('#', str(type[0]))
data = data.replace('$T', type[1])
data = data.replace('$N', type[2])
java += data
### -----------------------------------------------------------------
java += '}'
f = open('ConvertBean.java', 'w')
f.write(java)
f.close() | vilmospapp/jodd | jodd-core/src/main/python/ConvertBean.py | Python | bsd-2-clause | 4,825 |
#!/usr/bin/env python
#
# :copyright: (c) 2013 by Mike Taylor
# :author: Mike Taylor
# :license: BSD 2-Clause
#
# See LICENSE file for details
#
import os
import time
import json
import argparse
import pyrax
_data_centers = [ 'DFW', 'ORD' ]
_commands = [ 'list' ]
_config_file = '~/.rackspace.cfg'
_username = 'ops'
_server_info_keys = ( 'accessIPv4', 'status', 'name' )
_marker = '##### auto-generated for rsinfo #####'
_usage = """
list list details for the servers
If a server name is specified, the list will
only contain that server
If a datacenter has been given, the list will only
contain those servers
hosts generate output that can be used in /etc/hosts
ssh generate output that can be used in ~/.ssh/config
Config File Format:
[rackspace_cloud]
username = USERNAME
api_key = KEY
"""
def loadConfig():
parser = argparse.ArgumentParser(epilog=_usage, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-c', '--config', default=_config_file, help='where to retrieve configuration items and the rackspace API keys (default: %(default)s)')
parser.add_argument('-d', '--datacenter', default='ALL', help='datacenter to work within (default: %(default)s)', choices=_data_centers)
parser.add_argument('-s', '--server', help='limit output to the named server')
parser.add_argument('command', choices=['list', 'hosts', 'ssh'])
return parser.parse_args()
def initCredentials(datacenter):
pyrax.set_setting("identity_type", "rackspace")
pyrax.set_credential_file(os.path.expanduser(cfg.config), datacenter)
def loadServers(datacenters):
# {'OS-EXT-STS:task_state': None,
# 'addresses': { u'public': [],
# u'private': []
# },
# 'links': [],
# 'image': { u'id': u'GUID',
# u'links': []
# },
# 'manager': <novaclient.v1_1.servers.ServerManager object at 0x101abb450>,
# 'OS-EXT-STS:vm_state': u'active',
# 'flavor': { u'id': u'2',
# u'links': []
# },
# 'id': u'',
# 'user_id': u'NNN',
# 'OS-DCF:diskConfig': u'AUTO',
# 'accessIPv4': u'',
# 'accessIPv6': u'',
# 'progress': 100,
# 'OS-EXT-STS:power_state': 1,
# 'metadata': {},
# 'status': u'ACTIVE',
# 'updated': u'2013-04-25T05:11:09Z',
# 'hostId': u'',
# 'key_name': None,
# 'name': u'sssss',
# 'created': u'2013-02-11T19:33:31Z',
# 'tenant_id': u'NNN',
# '_info': {},
# 'config_drive': u'',
# '_loaded': True
# }
result = {}
for dc in datacenters:
initCredentials(dc)
print 'searching for servers in', dc
cs = pyrax.cloudservers
for s in cs.servers.list(detailed=True):
if s.name not in result:
result[s.name] = None
result[s.name] = s
print len(result), 'servers processed'
return result
def loadFileWithoutAutoGeneratedItems(filename, marker):
print 'loading', filename
result = []
f = False
for line in open(filename, 'r').readlines():
if line.startswith(marker):
f = not f
else:
if not f:
result.append(line)
return result
def saveFile(filepath, filename, cleandata, newdata, marker, allowAlt=False):
fullname = os.path.join(filepath, filename)
print 'saving', fullname
try:
h = open(fullname, 'w+')
h.write(''.join(cleandata))
h.write('\n%s\n' % marker)
h.write(''.join(newdata))
h.write('\n%s\n' % marker)
except IOError:
print 'unable to write to', fullname
if allowAlt:
print 'attempting alternate location'
saveFile('/tmp', filename, cleandata, newdata, marker)
def getServerInfo(serverName, serverList):
result = None
if cfg.datacenter == 'ALL':
s = ' in server list'
else:
s = ' in datacenter %s' % cfg.datacenter
if serverName not in serverList:
print '%s not found %s' % (serverName, s)
else:
item = serverList[serverName]
result = {}
for key in _server_info_keys:
result[key] = item.__getattr__(key)
return result
_hosts_config = """%(accessIPv4)s\t%(name)s\n"""
def generateHostsFile(servers):
clean = loadFileWithoutAutoGeneratedItems('/etc/hosts', _marker)
new = []
for s in servers:
r = getServerInfo(s, servers)
new.append(_hosts_config % r)
saveFile('/etc', 'hosts', clean, new, _marker, allowAlt=True)
_ssh_config = """Host %(name)s
User %(username)s
StrictHostKeyChecking no
IdentityFile ~/.ssh/id_rsa
"""
def generateConfigFile(servers):
clean = loadFileWithoutAutoGeneratedItems('~/.ssh/config', _marker)
new = []
for s in servers:
r = getServerInfo(s, servers)
r['username'] = _username
new.append(_ssh_config % r)
saveFile('~/.ssh', 'config', clean, new, _marker, allowAlt=True)
def getCommandParam(cmdText, commands):
try:
p = commands.index(cmdText)
result = commands[p + 1]
except:
result = ''
return result
if __name__ == '__main__':
cfg = loadConfig()
if cfg.datacenter == 'ALL':
datacenters = _data_centers
else:
datacenters = [ datacenter ]
servers = loadServers(datacenters)
if cfg.command == 'list':
results = []
if cfg.server is None:
for s in servers:
r = getServerInfo(s, servers)
if r is not None:
results.append(r)
else:
r = getServerInfo(cfg.server, servers)
if r is not None:
results.append(r)
print json.dumps(results)
elif cfg.command == 'hosts':
generateHostsFile(servers)
elif cfg.command == 'ssh':
generateConfigFile(servers)
| bear/rsinfo | rsinfo.py | Python | bsd-2-clause | 6,094 |
# Example: How to prepare a new refund with the Mollie API.
#
import os
from mollie.api.client import Client
from mollie.api.error import Error
def main():
try:
#
# Initialize the Mollie API library with your API key.
#
# See: https://www.mollie.com/dashboard/settings/profiles
#
api_key = os.environ.get("MOLLIE_API_KEY", "test_test")
mollie_client = Client()
mollie_client.set_api_key(api_key)
body = ""
payment_id = ""
body += "<p>Attempting to retrieve the first page of payments and grabbing the first.</p>"
payments = mollie_client.payments.list()
if not len(payments):
body += "<p>You have no payments. You can create one from the examples.</p>"
return body
payment = next(payments)
if (
payment.can_be_refunded()
and payment.amount_remaining["currency"] == "EUR"
and float(payment.amount_remaining["value"]) >= 2.0
):
data = {"amount": {"value": "2.00", "currency": "EUR"}}
refund = mollie_client.payment_refunds.with_parent_id(payment_id).create(data)
body += f'<p>{refund.amount["currency"]} {refund.amount["value"]} of payment {payment_id} refunded</p>'
else:
body += f"<p>Payment {payment_id} can not be refunded</p>"
return body
except Error as err:
return f"API call failed: {err}"
if __name__ == "__main__":
print(main())
| mollie/mollie-api-python | examples/11-refund-payment.py | Python | bsd-2-clause | 1,522 |
# -*- coding: utf-8 -*-
# This code is distributed under the two-clause BSD license.
# Copyright (c) 2012-2013 Raphaël Barrois
from __future__ import absolute_import, unicode_literals
import logging
from django.core import exceptions
from django.db import models
from . import conf
PERM_USER = 'user'
PERM_GROUP_MEMBER = 'grpmember'
PERM_GROUP_ADMIN = 'grpadmin'
PERM_ADMIN = 'admin'
PERM_LEVELS = (
PERM_USER,
PERM_GROUP_MEMBER,
PERM_GROUP_ADMIN,
PERM_ADMIN,
)
logger = logging.getLogger(__name__)
def get_model(model_name):
"""Retrieve a django model.
This handles:
- Explicit models.Model subclass
- Absolute dotted path to import the model
"""
if isinstance(model_name, type) and issubclass(models.Model, model_name):
return model_name
# Not a Model or a Model instance, must be a class path
if '.' not in model_name:
raise ValueError("Invalid model name %s: should include module name."
% model_name)
app, cls = model_name.rsplit('.', 1)
return models.get_model(app, cls)
class AuthResult(object):
"""Result of an authentication query."""
def __init__(self, data):
self.data = data or {}
self.perms = self._setup_perms(self.data)
def __repr__(self):
return '<AuthResult: [%s / %s]>' % (self.data, self.perms)
def _setup_perms(self, data):
perms = set()
perms.add(PERM_USER)
if 'perms' in data:
perms.add(data['perms'])
if 'grpauth' in data:
if data['grpauth'] == 'admin':
perms.add(PERM_GROUP_ADMIN)
perms.add(PERM_GROUP_MEMBER)
elif data['grpauth'] == 'membre':
perms.add(PERM_GROUP_MEMBER)
return perms
@property
def username(self):
return self.data.get('username', '')
@property
def firstname(self):
return self.data.get('firstname', '')
@property
def lastname(self):
return self.data.get('lastname', '')
@property
def promo(self):
return self.data.get('promo', '')
@property
def email(self):
return self.data.get('email', '')
@property
def is_dead(self):
return bool(self.data.get('deathdate', ''))
@property
def is_admin(self):
return PERM_ADMIN in self.perms
def has_perm(self, perm):
return perm in self.perms
class AuthGroupeXMixin(object):
def __init__(self, config=None, *args, **kwargs):
super(AuthGroupeXMixin, self).__init__(*args, **kwargs)
self.config = config or conf.AuthGroupeXConf()
# Public API
# ==========
def authenticate(self, **kwargs):
"""Create a user if the authgroupex data has been passed.
This data should be present in the 'authgroupex' keyword argument.
"""
if 'authgroupex' in kwargs:
auth_data = kwargs['authgroupex']
else:
logger.info('Trying to authenticate, no authgroupex in data.')
return None
if not auth_data.username:
logger.error('Received a AuthResult object without a username.')
return None
try:
user = self._fetch_user(auth_data.username)
except exceptions.ObjectDoesNotExist:
try:
user = self._create_user_from_auth_data(auth_data)
except ValueError:
logger.warning('Received authgroupex with invalid name %s',
auth_data.username)
return None
self._update_user(user, auth_data)
return user
# Required extension points
# =========================
def get_user(self, user_id):
raise NotImplementedError()
def _fetch_user(self, username):
raise NotImplementedError()
def _create_user(self, username):
raise NotImplementedError()
# Optional extension points
# =========================
def _set_staff(self, user, is_staff):
if hasattr(user, 'is_staff'):
user.is_staff = is_staff
def _set_superuser(self, user, is_superuser):
if hasattr(user, 'is_superuser'):
user.is_superuser = is_superuser
def _set_active(self, user, is_active):
if hasattr(user, 'is_active'):
user.is_active = is_active
def _update_profile(self, user, auth_data):
"""Update fields of the profile according to auth-groupe-x data."""
pass
def _update_groups(self, user, auth_data):
pass
# Internals
# =========
def _update_perms(self, user, auth_data):
# Handle staff status
if self.config.STAFF_PERMS:
self._set_staff(user, any(
auth_data.has_perm(perm) for perm in self.config.STAFF_PERMS))
# Handle superadmins
if self.config.SUPERADMIN_PERMS:
is_superuser = any(
auth_data.has_perm(perm) for perm in self.config.SUPERADMIN_PERMS)
self._set_superuser(user, is_superuser)
if is_superuser:
self._set_staff(user, True)
# Handle active status
if auth_data.is_dead and self.config.DISABLE_DEADS:
self._set_active(user, False)
def _update_user(self, user, auth_data):
"""Update various fields of the user according to auth-groupe-x data."""
self._update_profile(user, auth_data)
self._update_perms(user, auth_data)
self._update_groups(user, auth_data)
# Refresh DB user
user.save()
logger.info('Updated user %s', user.get_username())
def _create_user_from_auth_data(self, auth_data):
"""Create a new Django user from AuthGroupeX data.
This only sets the basic username field;
groups and other data are handled by the update_user method.
"""
username = auth_data.username
user = self._create_user(username)
user.set_unusable_password()
logger.info('Created a new user with username %s', username)
return user
class AuthGroupeXBackend(AuthGroupeXMixin):
"""Authentication backend for auth-groupe-x"""
supports_anonymous_user = False
supports_object_permissions = False
def __init__(self, config=None, *args, **kwargs):
super(AuthGroupeXBackend, self).__init__(config=config, *args, **kwargs)
self.user_model = get_model(self.config.USER_MODEL)
def get_user(self, user_id):
"""Retrieve a user by ID.
Args:
user_id: int, the ID of the user
Returns:
Either an instance of self.config.USER_MODEL or None
"""
try:
return self.user_model.objects.get(pk=user_id)
except self.user_model.DoesNotExist:
return None
def _fetch_user(self, username):
return self.user_model.objects.get(username=username)
def _create_user(self, username):
return self.user_model.objects.create(username=username, is_active=True)
def _update_profile(self, user, auth_data):
"""Update fields of the profile according to auth-groupe-x data."""
# Update basic profile data
if auth_data.firstname:
user.first_name = auth_data.firstname
if auth_data.lastname:
user.last_name = auth_data.lastname
if auth_data.email:
user.email = auth_data.email
if getattr(self.config, 'PROFILE_CLASS', ''):
profile_model = get_model(self.config.PROFILE_CLASS)
try:
profile = user.get_profile()
except profile_model.DoesNotExist:
profile = profile_model.objects.create(user=user)
if auth_data.promo:
profile.promo = auth_data.promo
profile.save()
def _update_groups(self, user, auth_data):
"""Update django groups of the user according to auth-groupe-x data"""
if not self.config.MAP_GROUPS:
return
# Gather names of django groups by mapping perms using MAP_GROUPS
new_group_names = set()
old_group_names = set()
for perm in PERM_LEVELS:
if auth_data.has_perm(perm):
new_group_names |= set(self.config.MAP_GROUPS.get(perm, []))
else:
old_group_names |= set(self.config.MAP_GROUPS.get(perm, []))
# Find django group objects
group_model = get_model(self.config.GROUP_MODEL)
new_groups = list(group_model.objects.filter(name__in=new_group_names))
old_groups = list(group_model.objects.filter(name__in=old_group_names))
if old_groups:
logger.info(u"Removing user %s from groups %s", user, new_groups)
user.groups.remove(*list(old_groups))
if new_groups:
logger.info(u"Adding user %s to groups %s", user, new_groups)
user.groups.add(*list(new_groups))
| Polytechnique-org/django-authgroupex | django_authgroupex/auth.py | Python | bsd-2-clause | 8,966 |
#!/usr/bin/env python
from __future__ import print_function
import json
import logging
from .taxon_concept_node import TaxonConceptSemNode
from .verbatim_name import VerbatimSemNode
from .names_for_ranks import (GenusGroupSemNode,
HigherGroupSemNode,
SpecimenCodeSemNode,
SpeciesGroupSemNode,
TypeSpecimen
)
from .name import CombinationSemNode
from .graph_node import AuthoritySemNode
_LOG = logging.getLogger(__name__)
class SemGraph(object):
att_list = ['_authorities',
'_combinations',
'_genus_group_names',
'_higher_group_names',
'_references',
'_species_group_epithets',
'_specimen_codes',
'_specimens',
'_taxon_concepts',
'_type_specimens',
'_verbatim_name',
]
att_set = frozenset(att_list)
def register_obj(self, id_minting_context, obj):
return _register_new_node(self, id_minting_context, obj)
def __init__(self, taxolotl_config, res):
self.config = taxolotl_config
self.res = res
self._by_id = {}
self._authorities = None
self._specimens = None
self._specimen_codes = None
self._species_group_epithets = None
self._genus_group_names = None
self._combinations = None
self._higher_group_names = None
self._verbatim_name = None
self._taxon_concepts = None
self._references = None
self._type_specimens = None
def impute_type_specimens(self):
for tc in self.taxon_concept_list:
if not tc.is_specimen_based:
continue
if tc.hybrid or tc.undescribed or not tc.rank:
continue
if tc.rank == 'species':
epithet = tc.most_terminal_name
try:
if not epithet.type_materials:
self._add_type_specimen(None, epithet, tc.is_synonym_of)
except:
_LOG.exception("problem adding type materials")
for tc in self.taxon_concept_list:
if tc.hybrid or tc.undescribed or not tc.rank:
continue
if tc.is_specimen_based and tc.rank != 'species':
infra_epithet = tc.most_terminal_name
if infra_epithet is tc.has_name.sp_epithet:
continue
if not infra_epithet.type_materials:
self._add_type_specimen(None, infra_epithet, tc.is_synonym_of)
def denormalize_homonyms(self):
# for species ranK:
# multiple authority entries
# same valid epithet in multiple valid genera
dup_auth_to_mint = {}
for tc in self.taxon_concept_list:
if tc.rank and tc.rank == 'species':
epithet = tc.most_terminal_name
if epithet is None:
if not (tc.hybrid or tc.undescribed):
_LOG.warning('NO Epithet for = {}'.format(tc.__dict__))
continue
if isinstance(epithet._authority, list):
dup_auth_to_mint.setdefault(epithet, []).append(tc)
for name, tc_list in dup_auth_to_mint.items():
verb_name = tc_list[0].has_name
other_name_tc_pairs = []
same_name_tc = []
for tc in tc_list[1:]:
if tc.has_name is verb_name:
same_name_tc.append(tc)
else:
other_name_tc_pairs.append(tc)
for other in other_name_tc_pairs:
self._split_tc_with_shared_sp_epithet(tc_list[0], other)
for other in same_name_tc:
self._split_tc_with_shared_name(tc_list[0], other)
if self.res.id.startswith('cof'):
import sys
# sys.exit(1)
def _split_tc_with_shared_sp_epithet(self, fixed, other):
assert fixed is not other
fix_name, oth_name = fixed.has_name, other.has_name
_LOG.debug('splitting "{}" from ["{}"]'.format(fix_name.name, oth_name.name))
fix_genus, oth_genus = fix_name.genus_name, oth_name.genus_name
fix_sp_epi, oth_sp_epi = fix_name.sp_epithet, oth_name.sp_epithet
assert fix_sp_epi is oth_sp_epi
if fix_sp_epi in oth_genus.contained:
oth_genus.contained.remove(fix_sp_epi)
new_epi = self._add_sp_epithet(other, fix_sp_epi._name, oth_genus, avoid_dup=False)
oth_genus.contained.append(new_epi)
oth_name.sp_epithet = new_epi
vtc = other
if vtc._is_synonym_of:
vtc = vtc._is_synonym_of
for a in fix_sp_epi._authority:
if other in a.taxon_concept_set or vtc in a.taxon_concept_set:
new_epi.claim_authority(a)
break
assert new_epi._authority
fix_sp_epi._authority.remove(new_epi._authority)
if len(fix_sp_epi._authority) == 1:
fix_sp_epi._authority = fix_sp_epi._authority[0]
def _split_tc_with_shared_name(self, fixed, other):
fix_vname = fixed.has_name
assert fix_vname is other.has_name
new_vname = self._add_verbatim_name(other, fix_vname.name, avoid_dup=False)
assert not fix_vname.specimen_codes
for attr in VerbatimSemNode.extra_pred:
v = getattr(fix_vname, attr, None)
if v:
setattr(new_vname, attr, v)
other.has_name = new_vname
self._split_tc_with_shared_sp_epithet(fixed, other)
def postorder_taxon_concepts(self):
yielded = set()
tcl = self.taxon_concept_list
todo = []
for tc in tcl:
if not tc.child_set:
yielded.add(tc)
yield tc
else:
todo.append(tc)
prev_todo_len = 1 + len(todo)
while todo:
assert prev_todo_len > len(todo)
prev_todo_len = len(todo)
ntd = []
for tc in todo:
if tc.child_set.issubset(yielded):
yielded.add(tc)
yield tc
else:
ntd.append(tc)
todo = ntd
@property
def taxon_concept_list(self):
return self._taxon_concepts if self._taxon_concepts else []
def _all_specimen_based_tax_con_dict(self):
r = {}
for tcobj in self.taxon_concept_list:
if tcobj.is_specimen_based:
r[tcobj.canonical_id] = tcobj
return r
def _all_higher_tax_con_dict(self):
r = {}
for tcobj in self.taxon_concept_list:
if not tcobj.is_specimen_based:
r[tcobj.canonical_id] = tcobj
return r
def find_valid_genus(self, genus_name):
r = []
for tc in self._all_higher_tax_con_dict().values():
if tc.rank and tc.rank == 'genus':
if tc.is_valid_for_name(genus_name):
r.append(tc)
return r
def specimen_based_synonym_taxa(self):
d = self._all_specimen_based_tax_con_dict()
r = {}
for tcid, tcobj in d.items():
syn_list = tcobj.synonyms if tcobj.synonyms else []
for syn_id in syn_list:
r[syn_id] = d[syn_id]
return r
@property
def valid_specimen_based_taxa(self):
d = self._all_specimen_based_tax_con_dict()
r = {}
for tcid, tcobj in d.items():
if not tcobj.is_synonym_of:
r[tcid] = tcobj
return r
@property
def valid_taxa_dict(self):
raw = self._taxon_concepts if self._taxon_concepts else []
r = {}
for tcobj in raw:
if not tcobj.is_synonym_of:
r[tcobj.canonical_id] = tcobj
return r
@property
def canonical_name_str_to_taxon_concept_map(self):
return {i.canonical_name.name: i for i in self._taxon_concepts}
@property
def valid_name_to_taxon_concept_map(self):
return {i.valid_name.name: i for i in self.valid_taxa_dict.values()}
def get_by_id(self, can_id, default=None):
return self._by_id.get(can_id, default)
def _add_name(self, container, node_type, parent_sem_node, name, extra_container=None, avoid_dup=True):
search_cont = container if extra_container is None else extra_container.contained
x = None if (not avoid_dup) else _find_by_name(search_cont, name)
if x is None:
d = {'parent_id': parent_sem_node.canonical_id}
if extra_container is not None:
d['class_tag'] = 'epi' # need to figure out if this is the best choice for any extra container obj
x = node_type(self, d, name)
search_cont.append(x)
if search_cont is not container:
container.append(x)
return x
def add_authority(self, tax_con_sem_node, name_sem, authors, year):
auth_list = self.authorities
x = None
for a in auth_list:
if a.authors == authors and a.year == year:
x = a
break
if x is None:
d = {'parent_id': name_sem.canonical_id}
x = AuthoritySemNode(self, d, authors, year, tax_con_sem_node)
else:
x.taxon_concept_set.add(tax_con_sem_node)
auth_list.append(x)
name_sem.claim_authority(x)
return x
def _add_normalized(self, par_sem_node, name_str):
return self._add_name(self.combinations, CombinationSemNode, par_sem_node, name_str)
def _add_combination(self, par_sem_node, name_str):
return self._add_name(self.combinations, CombinationSemNode, par_sem_node, name_str)
def _add_verbatim_name(self, tax_con_sem_node, name_str, avoid_dup=True):
return self._add_name(self.verbatim_name, VerbatimSemNode, tax_con_sem_node, name_str, avoid_dup=avoid_dup)
def _add_genus(self, par_sem_node, name_str):
return self._add_name(self.genus_group_names, GenusGroupSemNode, par_sem_node, name_str)
_add_subgenus = _add_genus
def _add_higher_group_name(self, par_sem_node, name_str):
return self._add_name(self.higher_group_names, HigherGroupSemNode, par_sem_node, name_str)
def _add_sp_epithet(self, par_sem_node, name_str, prev_word_sn, avoid_dup=True):
return self._add_name(self.species_group_epithets, SpeciesGroupSemNode,
par_sem_node, name_str, prev_word_sn, avoid_dup=avoid_dup)
_add_infra_epithet = _add_sp_epithet
def _add_specimen_code(self, par_sem_node, name_str):
return self._add_name(self.specimen_codes, SpecimenCodeSemNode, par_sem_node, name_str)
def add_taxon_concept(self, foreign_id):
x = TaxonConceptSemNode(self, foreign_id)
self.taxon_concepts.append(x)
return x
def remove_taxon_concept(self, tc):
if tc in self._taxon_concepts:
self._taxon_concepts.remove(tc)
if tc.canonical_id in self._by_id:
del self._by_id[tc.canonical_id]
def _add_type_specimen(self, spec_code, epithet_syn_name, valid_taxon):
d = {'parent_id': epithet_syn_name.canonical_id}
x = TypeSpecimen(self, d, spec_code, epithet_syn_name, valid_taxon)
self.type_specimens.append(x)
epithet_syn_name.claim_type_material(x)
return x
def __getattr__(self, item):
hidden = '_{}'.format(item)
if hidden not in SemGraph.att_set:
return self.__getattribute__(item)
v = getattr(self, hidden)
if v is None:
v = []
setattr(self, hidden, v)
return v
def as_dict(self):
d = {}
for hidden in SemGraph.att_list:
v = getattr(self, hidden)
if v is not None:
d[hidden[1:]] = {i.canonical_id: i.as_dict() for i in v}
return d
def _find_by_name(container, name):
if container:
for el in container:
if el._name == name:
return el
return None
_NODE_CLASS_NAME_TO_CT = {AuthoritySemNode: 'auth',
SpecimenCodeSemNode: 'spec_code',
HigherGroupSemNode: 'clade',
SpeciesGroupSemNode: 'sp',
GenusGroupSemNode: 'gen',
VerbatimSemNode: 'verbatim',
CombinationSemNode: 'combin',
}
def _register_new_node(graph, id_minting_context, obj):
"""Returns a canonical_id for a new obj.
The goal is to generate unique IDs that are somewhat human readable to make it
easier to browse the graph.
`id_minting_context`: has
* "parent_id" or "context_id"
* "class_tag" or func will use the class of `obj` to tag classes
"""
assert isinstance(id_minting_context, dict)
pref_str, context_id = id_minting_context.get('parent_id'), ''
if pref_str is None:
pref_str = graph.res.base_resource.id
context_id = id_minting_context['context_id']
ct = id_minting_context.get('class_tag')
if ct is None:
ct = _NODE_CLASS_NAME_TO_CT[obj.__class__]
can_id = _canonicalize(pref_str, ct, context_id)
rci, n = can_id, 1
while True:
wtid = graph._by_id.get(can_id)
if wtid is None:
graph._by_id[can_id] = obj
return can_id
if wtid == obj:
return can_id
n += 1
can_id = '{}:v{}'.format(rci, n)
def _canonicalize(res_id, pred_id, entity_id):
ne = [str(i) for i in (res_id, pred_id, entity_id) if i]
return ':'.join(ne)
| mtholder/taxalotl | taxalotl/sem_graph/graph.py | Python | bsd-2-clause | 13,901 |
import codecs
import six
from builtins import super
from builtins import range
import struct
import time
class InvalidPacketError(Exception):
pass
class BootloaderError(Exception):
pass
class BootloaderTimeoutError(BootloaderError):
pass
# TODO: Implement Security key functionality
class BootloaderKeyError(BootloaderError):
STATUS = 0x01
def __init__(self):
super().__init__("The provided security key was incorrect")
class VerificationError(BootloaderError):
STATUS = 0x02
def __init__(self):
super().__init__("The flash verification failed.")
class IncorrectLength(BootloaderError):
STATUS = 0x03
def __init__(self):
super().__init__("The amount of data available is outside the expected range")
class InvalidData(BootloaderError):
STATUS = 0x04
def __init__(self):
super().__init__("The data is not of the proper form")
class InvalidCommand(BootloaderError):
STATUS = 0x05
def __init__(self):
super().__init__("Command unsupported on target device")
class UnexpectedDevice(BootloaderError):
STATUS = 0x06
class UnsupportedBootloaderVersion(BootloaderError):
STATUS = 0x07
class InvalidChecksum(BootloaderError):
STATUS = 0x08
class InvalidArray(BootloaderError):
STATUS = 0x09
class InvalidFlashRow(BootloaderError):
STATUS = 0x0A
class ProtectedFlash(BootloaderError):
STATUS = 0x0B
class InvalidApp(BootloaderError):
STATUS = 0x0C
class TargetApplicationIsActive(BootloaderError):
STATUS = 0x0D
def __init__(self):
super().__init__("The application is currently marked as active or golden image")
class CallbackResponseInvalid(BootloaderError):
STATUS = 0x0E
class UnknownError(BootloaderError):
STATUS = 0x0F
class BootloaderResponse(object):
FORMAT = ""
ARGS = ()
ERRORS = {klass.STATUS: klass for klass in [
BootloaderKeyError,
VerificationError,
IncorrectLength,
InvalidData,
InvalidCommand,
InvalidChecksum,
UnexpectedDevice,
UnsupportedBootloaderVersion,
InvalidArray,
InvalidFlashRow,
ProtectedFlash,
InvalidApp,
TargetApplicationIsActive,
CallbackResponseInvalid,
UnknownError
]}
def __init__(self, data):
try:
unpacked = struct.unpack(self.FORMAT, data)
except struct.error as e:
raise InvalidPacketError("Cannot unpack packet data '{}': {}".format(data, e))
for arg, value in zip(self.ARGS, unpacked):
if arg:
setattr(self, arg, value)
@classmethod
def decode(cls, data, checksum_func):
start, status, length = struct.unpack("<BBH", data[:4])
if start != 0x01:
raise InvalidPacketError("Expected Start Of Packet signature 0x01, found 0x{0:01X}".format(start))
expected_dlen = len(data) - 7
if length != expected_dlen:
raise InvalidPacketError("Expected packet data length {} actual {}".format(length, expected_dlen))
checksum, end = struct.unpack("<HB", data[-3:])
data = data[:length + 4]
if end != 0x17:
raise InvalidPacketError("Invalid end of packet code 0x{0:02X}, expected 0x17".format(end))
calculated_checksum = checksum_func(data)
if checksum != calculated_checksum:
raise InvalidPacketError(
"Invalid packet checksum 0x{0:02X}, expected 0x{1:02X}".format(checksum, calculated_checksum))
# TODO Handle status 0x0D: The application is currently marked as active
if (status != 0x00):
response_class = cls.ERRORS.get(status)
if response_class:
raise response_class()
else:
raise InvalidPacketError("Unknown status code 0x{0:02X}".format(status))
data = data[4:]
return cls(data)
class BootloaderCommand(object):
COMMAND = None
FORMAT = ""
ARGS = ()
RESPONSE = None
def __init__(self, **kwargs):
for arg in kwargs:
if arg not in self.ARGS:
raise TypeError("Argument {} not in command arguments".format(arg))
self.args = [kwargs[arg] for arg in self.ARGS]
@property
def data(self):
return struct.pack(self.FORMAT, *self.args)
class BooleanResponse(BootloaderResponse):
FORMAT = "B"
ARGS = ("status",)
class EmptyResponse(BootloaderResponse):
pass
class VerifyChecksumCommand(BootloaderCommand):
COMMAND = 0x31
RESPONSE = BooleanResponse
class GetFlashSizeResponse(BootloaderResponse):
FORMAT = "<HH"
ARGS = ("first_row", "last_row")
class GetFlashSizeCommand(BootloaderCommand):
COMMAND = 0x32
FORMAT = "B"
ARGS = ("array_id",)
RESPONSE = GetFlashSizeResponse
class GetAppStatusResponse(BootloaderResponse):
FORMAT = "<BB"
ARGS = ("app_valid", "app_active")
class GetAppStatusCommand(BootloaderCommand):
COMMAND = 0x33
FORMAT = "B"
ARGS = ("application_id",)
RESPONSE = GetAppStatusResponse
class EraseRowCommand(BootloaderCommand):
COMMAND = 0x34
FORMAT = "<BH"
ARGS = ("array_id", "row_id")
RESPONSE = EmptyResponse
class SyncBootloaderCommand(BootloaderCommand):
COMMAND = 0x35
RESPONSE = EmptyResponse
class SetAppActive(BootloaderCommand):
COMMAND = 0x36
FORMAT = "B"
ARGS = ("application_id",)
RESPONSE = EmptyResponse
class SendDataCommand(BootloaderCommand):
COMMAND = 0x37
RESPONSE = EmptyResponse
def __init__(self, data):
self._data = data
super(SendDataCommand, self).__init__()
@property
def data(self):
return self._data
class EnterBootloaderResponse(BootloaderResponse):
FORMAT = "<IBHB"
ARGS = ("silicon_id", "silicon_rev", "bl_version", "bl_version_2")
class EnterBootloaderCommand(BootloaderCommand):
COMMAND = 0x38
RESPONSE = EnterBootloaderResponse
def __init__(self, key):
self._key = key
super(EnterBootloaderCommand, self).__init__()
@property
def data(self):
if self._key is None:
return super(EnterBootloaderCommand, self).data
return super(EnterBootloaderCommand, self).data + struct.pack("<BBBBBB",
*self._key)
class ProgramRowCommand(BootloaderCommand):
COMMAND = 0x39
FORMAT = "<BH"
ARGS = ("array_id", "row_id")
RESPONSE = EmptyResponse
def __init__(self, data, **kwargs):
self._data = data
super(ProgramRowCommand, self).__init__(**kwargs)
@property
def data(self):
return super(ProgramRowCommand, self).data + self._data
class ChecksumResponse(BootloaderResponse):
FORMAT = "<B"
ARGS = ("checksum",)
class VerifyRowCommand(BootloaderCommand):
COMMAND = 0x3A
FORMAT = "<BH"
ARGS = ("array_id", "row_id")
RESPONSE = ChecksumResponse
class ExitBootloaderCommand(BootloaderCommand):
COMMAND = 0x3B
RESPONSE = EmptyResponse
class GetMetadataResponse(BootloaderResponse):
# TODO: metadata format differs in PSOC3 and 4/5
FORMAT = "<BIII7xBBHHH28x"
ARGS = (
"checksum",
"bootloadable_addr",
"bootloader_last_row",
"bootloadable_len",
"active",
"verified",
"app_version",
"app_id",
"custom_id",
)
def __str__(self):
sb = []
for key in self.__dict__:
sb.append("{key}='{value}'".format(key=key, value=self.__dict__[key]))
return ', '.join(sb)
def __repr__(self):
return self.__str__()
class GetPSOC5MetadataResponse(BootloaderResponse):
# TODO: metadata format differs in PSOC3 and 4/5
FORMAT = "<BIHxxIxxxBBHHHI28x"
ARGS = (
"checksum",
"bootloadable_addr",
"bootloader_last_row",
"bootloadable_len",
"active",
"verified",
"bootloader_version",
"app_id",
"app_version",
"app_custom_id",
)
def __str__(self):
sb = []
for key in self.__dict__:
sb.append("{key}='{value}'".format(key=key, value=self.__dict__[key]))
return ', '.join(sb)
def __repr__(self):
return self.__str__()
class GetMetadataCommand(BootloaderCommand):
COMMAND = 0x3C
FORMAT = "<B"
ARGS = ("application_id",)
RESPONSE = GetMetadataResponse
class GetPSOC5MetadataCommand(BootloaderCommand):
COMMAND = 0x3C
FORMAT = "<B"
ARGS = ("application_id",)
RESPONSE = GetPSOC5MetadataResponse
class BootloaderSession(object):
def __init__(self, transport, checksum_func):
self.transport = transport
self.checksum_func = checksum_func
def send(self, command, read=True):
data = command.data
packet = b"\x01" + struct.pack("<BH", command.COMMAND, len(data)) + data
packet = packet + struct.pack('<H', self.checksum_func(packet)) + b"\x17"
self.transport.send(packet)
if read:
response = self.transport.recv()
return command.RESPONSE.decode(response, self.checksum_func)
else:
return None
def enter_bootloader(self, key):
response = self.send(EnterBootloaderCommand(key))
return response.silicon_id, response.silicon_rev, response.bl_version | (response.bl_version_2 << 16)
def application_status(self, application_id):
response = self.send(GetAppStatusCommand(application_id=application_id))
return response.app_valid, response.app_active
def exit_bootloader(self):
self.send(ExitBootloaderCommand(), read=False)
def get_flash_size(self, array_id):
response = self.send(GetFlashSizeCommand(array_id=array_id))
return response.first_row, response.last_row
def verify_checksum(self):
return bool(self.send(VerifyChecksumCommand()).status)
def get_metadata(self, application_id=0):
return self.send(GetMetadataCommand(application_id=application_id))
def get_psoc5_metadata(self, application_id=0):
return self.send(GetPSOC5MetadataCommand(application_id=application_id))
def program_row(self, array_id, row_id, rowdata, chunk_size):
chunked = [rowdata[i:i + chunk_size] for i in range(0, len(rowdata), chunk_size)]
for chunk in chunked[0:-1]:
self.send(SendDataCommand(chunk))
self.send(ProgramRowCommand(chunked[-1], array_id=array_id, row_id=row_id))
def get_row_checksum(self, array_id, row_id):
return self.send(VerifyRowCommand(array_id=array_id, row_id=row_id)).checksum
def set_application_active(self, application_id):
self.send(SetAppActive(application_id=application_id))
class SerialTransport(object):
def __init__(self, f, verbose):
self.f = f
self._verbose = verbose
def send(self, data):
if self._verbose:
for part in bytearray(data):
print("s: 0x{:02x}".format(part))
self.f.write(data)
def recv(self):
data = self.f.read(4)
if len(data) < 4:
raise BootloaderTimeoutError("Timed out waiting for Bootloader response.")
size = struct.unpack("<H", data[-2:])[0]
data += self.f.read(size + 3)
if self._verbose:
for part in bytearray(data):
print("r: 0x{:02x}".format(part))
if len(data) < size + 7:
raise BootloaderTimeoutError("Timed out waiting for Bootloader response.")
return data
class CANbusTransport(object):
MESSAGE_CLASS = None
def __init__(self, transport, frame_id, timeout, echo_frames, wait_send_ms):
self.transport = transport
self.frame_id = frame_id
self.timeout = timeout
self.echo_frames = echo_frames
self.wait_send_s = wait_send_ms / 1000.0
self._last_sent_frame = None
def send(self, data):
start = 0
maxlen = len(data)
while (start < maxlen):
remaining = maxlen - start
if (remaining > 8):
msg = self.MESSAGE_CLASS(
extended_id=False,
arbitration_id=self.frame_id,
data=data[start:start + 8]
)
else:
msg = self.MESSAGE_CLASS(
extended_id=False,
arbitration_id=self.frame_id,
data=data[start:]
)
# Flush input mailbox(es)
while (self.transport.recv(timeout=0)):
pass
self.transport.send(msg)
self._last_sent_frame = msg
if (self.echo_frames):
# Read back the echo message
while (True):
frame = self.transport.recv(self.timeout)
if (not frame):
raise BootloaderTimeoutError("Did not receive echo frame within {} timeout".format(self.timeout))
# Don't check the frame arbitration ID, it may be used for varying purposes
if (frame.data[:frame.dlc] != msg.data[:msg.dlc]):
continue
# Ok, got a good frame
break
elif (self.wait_send_s > 0.0):
time.sleep(self.wait_send_s)
start += 8
def recv(self):
# Response packets read from the Bootloader have the following structure:
# Start of Packet (0x01): 1 byte
# Status Code: 1 byte
# Data Length: 2 bytes
# Data: N bytes of data
# Checksum: 2 bytes
# End of Packet (0x17): 1 byte
data = bytearray()
# Read first frame, contains data length
while True:
frame = self.transport.recv(self.timeout)
if (not frame):
raise BootloaderTimeoutError("Timed out waiting for Bootloader 1st response frame")
if frame.arbitration_id != self.frame_id:
continue
# Don't check the frame arbitration ID, it may be used for varying purposes
if len(frame.data) < 4:
raise BootloaderTimeoutError("Unexpected response data: length {}, minimum is 4".format(len(frame.data)))
if (frame.data[0] != 0x01):
raise BootloaderTimeoutError("Unexpected start of frame data: 0x{0:02X}, expected 0x01".format(frame.data[0]))
break
data += frame.data[:frame.dlc]
# 4 initial bytes, reported size, 3 tail
total_size = 4 + (struct.unpack("<H", data[2:4])[0]) + 3
while (len(data) < total_size):
frame = self.transport.recv(self.timeout)
if (not frame):
raise BootloaderTimeoutError("Timed out waiting for Bootloader response frame")
if (self.echo_frames) and (frame.arbitration_id != self.frame_id):
# Got a frame from another device, ignore
continue
data += frame.data[:frame.dlc]
return data
def crc16_checksum(data):
crc = 0xffff
for b in data:
if not isinstance(b, int):
b = ord(b)
for i in range(8):
if (crc & 1) ^ (b & 1):
crc = (crc >> 1) ^ 0x8408
else:
crc >>= 1
b >>= 1
crc = (crc << 8) | (crc >> 8)
return ~crc & 0xffff
def sum_2complement_checksum(data):
if (type(data) is str):
return (1 + ~sum([ord(c) for c in data])) & 0xFFFF
elif (type(data) in (bytearray, bytes)):
return (1 + ~sum(data)) & 0xFFFF
| arachnidlabs/cyflash | cyflash/protocol.py | Python | bsd-2-clause | 15,768 |
"""
Django settings for d3matt project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
ENV_ROOT = os.path.dirname(os.path.dirname(BASE_DIR))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xz@e41f!kjulkntr!e8f2sahhguv)eqy_04qd5st-g8vvlkx**'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'd3matt.urls'
WSGI_APPLICATION = 'd3matt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ENV_ROOT, 'd3matt.db'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(ENV_ROOT, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(ENV_ROOT, 'templates'),
)
CRISPY_TEMPLATE_PACK = 'bootstrap3'
LOGIN_REDIRECT_URL = '/d3matt/'
| d3matt/d3matt.com | src/d3matt/d3matt/settings.py | Python | bsd-2-clause | 2,826 |
"""
Copyright (c) 2013 The Regents of the University of California, AMERICAN INSTITUTES FOR RESEARCH
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Gabe Fierro [email protected] github.com/gtfierro
"""
import re
import os
import sys
import parse
import time
import itertools
import datetime
import logging
import requests
import zipfile
import cStringIO as StringIO
from bs4 import BeautifulSoup as bs
import lib.alchemy as alchemy
sys.path.append('lib')
from config_parser import get_config_options
logfile = "./" + 'xml-parsing.log'
logging.basicConfig(filename=logfile, level=logging.DEBUG)
def get_year_list(yearstring):
"""
Given a [yearstring] of forms
year1
year1-year2
year1,year2,year3
year1-year2,year3-year4
Expands into a list of year integers, and returns
"""
years = []
for subset in yearstring.split(','):
if subset == 'latest':
years.append('latest')
continue
sublist = subset.split('-')
start = int(sublist[0])
end = int(sublist[1])+1 if len(sublist) > 1 else start+1
years.extend(range(start,end))
return years
def generate_download_list(years, doctype='grant'):
"""
Given the year string from the configuration file, return
a list of urls to be downloaded
"""
if not years: return []
urls = []
link = 'https://www.google.com/googlebooks/uspto-patents-grants-text.html'
if doctype == 'application':
link = 'https://www.google.com/googlebooks/uspto-patents-applications-text.html'
url = requests.get(link)
soup = bs(url.content)
years = get_year_list(years)
# latest file link
if 'latest' in years:
a = soup.h3.findNext('h3').findPrevious('a')
urls.append(a['href'])
years.remove('latest')
# get year links
for year in years:
header = soup.find('h3', {'id': str(year)})
a = header.findNext()
while a.name != 'h3':
urls.append(a['href'])
a = a.findNext()
return urls
def download_files(urls):
"""
[downloaddir]: string representing base download directory. Will download
files to this directory in folders named for each year
Returns: False if files were not downloaded or if there was some error,
True otherwise
"""
import os
import requests
import zipfile
import cStringIO as StringIO
if not (downloaddir and urls): return False
complete = True
print 'downloading to',downloaddir
for url in urls:
filename = url.split('/')[-1].replace('zip','xml')
if filename in os.listdir(downloaddir):
print 'already have',filename
continue
print 'downloading',url
try:
r = requests.get(url)
z = zipfile.ZipFile(StringIO.StringIO(r.content))
print 'unzipping',filename
z.extractall(downloaddir)
except:
print 'ERROR: downloading or unzipping',filename
complete = False
continue
return complete
def run_parse(files, doctype='grant'):
import parse
import time
import sys
import itertools
import lib.alchemy as alchemy
import logging
logfile = "./" + 'xml-parsing.log'
logging.basicConfig(filename=logfile, level=logging.DEBUG)
parse.parse_files(files, doctype)
def run_clean(process_config):
if not process_config['clean']:
return
doctype = process_config['doctype']
command = 'run_clean.bat'
os.system(command)
def run_consolidate(process_config):
if not process_config['consolidate']:
return
doctype = process_config['doctype']
# TODO: optionally include previous disambiguation
command = 'run_consolidation.bat'
os.system(command)
if __name__=='__main__':
s = datetime.datetime.now()
# accepts path to configuration file as command line option
if len(sys.argv) < 2:
print('Please specify a configuration file as the first argument')
exit()
process_config, parse_config = get_config_options(sys.argv[1])
doctype = process_config['doctype']
# download the files to be parsed
urls = []
should_process_grants = doctype in ['all', 'grant']
should_process_applications = doctype in ['all', 'application']
if should_process_grants:
urls += generate_download_list(parse_config['years'], 'grant')
if should_process_applications:
urls += generate_download_list(parse_config['years'], 'application')
downloaddir = parse_config['downloaddir']
if downloaddir and not os.path.exists(downloaddir):
os.makedirs(downloaddir)
print 'Downloading files at {0}'.format(str(datetime.datetime.today()))
download_files(urls)
print 'Downloaded files:',parse_config['years']
f = datetime.datetime.now()
print 'Finished downloading in {0}'.format(str(f-s))
# find files
print "Starting parse on {0} on directory {1}".format(str(datetime.datetime.today()),parse_config['datadir'])
if should_process_grants:
files = parse.list_files(parse_config['datadir'],parse_config['grantregex'])
print 'Running grant parse...'
run_parse(files, 'grant')
f = datetime.datetime.now()
print "Found {2} files matching {0} in directory {1}"\
.format(parse_config['grantregex'], parse_config['datadir'], len(files))
if should_process_applications:
files = parse.list_files(parse_config['datadir'],parse_config['applicationregex'])
print 'Running application parse...'
run_parse(files, 'application')
f = datetime.datetime.now()
print "Found {2} files matching {0} in directory {1}"\
.format(parse_config['applicationregex'], parse_config['datadir'], len(files))
print 'Finished parsing in {0}'.format(str(f-s))
# run extra phases if needed
run_clean(process_config)
run_consolidate(process_config)
| CSSIP-AIR/PatentsProcessor | start.py | Python | bsd-2-clause | 7,205 |
import json
import re
from kqml import KQMLString
from .kqml_list import KQMLList
from .kqml_token import KQMLToken
from .kqml_exceptions import KQMLException
class CLJsonConverter(object):
def __init__(self, token_bools=False):
self.token_bools = token_bools
def cl_from_json(self, json_obj):
"""Read a json into a KQMLList, recursively using the CLJson paradigm.
Note: both false an None are mapped to NIL. This means parsing back will
not have exactly the same result as the original json dict/list, in some
cases.
"""
if isinstance(json_obj, str):
json_obj = json.loads(json_obj)
elif isinstance(json_obj, bytes):
json_obj = json.loads(json_obj.decode('utf-8'))
elif not isinstance(json_obj, list) and not isinstance(json_obj, dict):
raise ValueError("Input must be list, dict, or string/bytes "
"json, got %s." % type(json_obj))
return self._cl_from_json(json_obj)
def _cl_from_json(self, json_obj):
if isinstance(json_obj, list):
ret = KQMLList()
for elem in json_obj:
ret.append(self._cl_from_json(elem))
elif isinstance(json_obj, dict):
ret = KQMLList()
for key, val in json_obj.items():
ret.set(_key_from_string(key), self._cl_from_json(val))
elif isinstance(json_obj, str):
ret = KQMLString(json_obj)
elif isinstance(json_obj, bool):
if json_obj:
if self.token_bools:
ret = KQMLToken('TRUE')
else:
ret = KQMLToken('T')
else:
if self.token_bools:
ret = KQMLToken('FALSE')
else:
ret = KQMLToken('NIL')
elif isinstance(json_obj, int) or isinstance(json_obj, float):
ret = str(json_obj)
elif json_obj is None:
return KQMLToken('NIL')
else:
raise KQMLException("Unexpected value %s of type %s."
% (json_obj, type(json_obj)))
return ret
def cl_to_json(self, kqml_list):
"""Recursively convert a KQMLList into a json-style dict/list.
Note: Because NIL is used as both None and False in lisp, all NIL is
returned as None, even if the value was intended, or originally, False.
"""
if not isinstance(kqml_list, KQMLList):
raise ValueError("Only a KQMLList might be converted into json, "
"got %s." % type(kqml_list))
return self._cl_to_json(kqml_list)
def _cl_to_json(self, kqml_thing):
if isinstance(kqml_thing, KQMLList):
# Find all possible keys (things that start with ":")
possible_keys = re.findall(':([^\s]+)', kqml_thing.to_string())
# Determine the true keys by checking we can actually get something
# from them.
true_key_values = {}
for k in possible_keys:
val = kqml_thing.get(k)
if val is not None:
true_key_values[k] = val
# Extract the return value.
if len(true_key_values) == len(kqml_thing)/2:
# It's a dict!
ret = {}
for key, val in true_key_values.items():
ret[_string_from_key(key)] = self._cl_to_json(val)
elif not len(true_key_values):
# It's a list!
ret = []
for item in kqml_thing:
ret.append(self._cl_to_json(item))
else:
# It's not valid for json.
raise KQMLException("Cannot convert %s into json, neither "
"list nor dict." % kqml_thing.to_string())
elif isinstance(kqml_thing, KQMLToken):
s = kqml_thing.to_string()
if s == 'NIL':
# This could be either false or None. Because None will almost
# always have the same meaning as False in pep-8 compliant
# python, but not vice-versa, we choose None. To avoid this,
# you can set `token_bools` to True on your converter class,
# and True will be mapped to the token TRUE and False to FALSE.
ret = None
elif (not self.token_bools and s == 'T') \
or (self.token_bools and s == 'TRUE'):
ret = True
elif self.token_bools and s == 'FALSE':
ret = False
elif s.isdigit():
ret = int(s)
elif s.count('.') == 1 and all(seg.isdigit()
for seg in s.split('.')):
ret = float(s)
else:
ret = s
elif isinstance(kqml_thing, KQMLString):
ret = kqml_thing.string_value()
else:
raise KQMLException("Unexpected value %s of type %s."
% (kqml_thing, type(kqml_thing)))
return ret
JSON_TO_CL_PATTS = [
# Add a * before upper case at the beginning of lines or after an
# underscore.
([re.compile('^([A-Z][a-z])'), re.compile('_([A-Z][a-z])')],
lambda m: m.group(0).replace(m.group(1), '') + '*'
+ m.group(1).lower()),
# Replace Upper case not at the beginning or after an underscore.
([re.compile('([A-Z][a-z])')], lambda m: '-' + m.group(1).lower()),
# Replace groups of upper case words with surrounding pluses.
([re.compile('([A-Z0-9]+[A-Z0-9_]*[A-Z0-9]*)')],
lambda m: '+%s+' % m.group(1).lower().replace('_', '-')),
# Convert some other special underscores to --
([re.compile('([a-z])_([a-z0-9\+*])')], '\\1--\\2'),
]
def _key_from_string(key):
for patts, repl in JSON_TO_CL_PATTS:
for patt in patts:
try:
new_key = patt.sub(repl, key)
except Exception:
print("Exeption in key_from_string:", patt.pattern, repl, key)
raise
key = new_key
return key.upper()
CL_TO_JSON_PATTS = [
# Replacy -- with _
(re.compile('(--)'), '_'),
# Replace *a with A
(re.compile('\*([a-z])'), lambda m: m.group(1).upper()),
# Replace +abc-d+ with ABC_D
(re.compile('\+([a-z0-9-]+)\+'),
lambda m: m.group(1).upper().replace('-', '_')),
# Replace foo-bar with fooBar
(re.compile('-([a-z])'), lambda m: m.group(1).upper())
]
def _string_from_key(s):
s = s.lower()
for patt, repl in CL_TO_JSON_PATTS:
new_s = patt.sub(repl, s)
s = new_s
return s
| bgyori/pykqml | kqml/cl_json.py | Python | bsd-2-clause | 6,768 |
import csv
import os
from json import loads
from os.path import exists
from old.project import CassandraInsert
from old.project import CassandraUtils
cassandra = CassandraUtils()
QTD_STS_KEY = 'quotedStatus'
RTD_STS_KEY = 'retweetedStatus'
MT_STS_KEY = 'userMentionEntities'
def check_mention_entities(tweet):
return RTD_STS_KEY not in tweet and MT_STS_KEY in tweet and len(tweet[MT_STS_KEY]) > 0
def remove_retweets(tts_rows):
tts = map(lambda tt_row: loads(tt_row.tweet), tts_rows)
return filter(lambda tt: QTD_STS_KEY not in tt and RTD_STS_KEY not in tt, tts)
def find_retweets(tts_rows):
tts = map(lambda tt_row: loads(tt_row.tweet), tts_rows)
rts = []
rts.extend(map(lambda tt: tt[RTD_STS_KEY], filter(lambda tt: RTD_STS_KEY in tt, tts)))
rts.extend(map(lambda tt: tt[QTD_STS_KEY], filter(lambda tt: QTD_STS_KEY in tt, tts)))
return rts
def find_mentions(tts_rows):
tts = map(lambda tt_row: loads(tt_row.tweet), tts_rows)
return filter(lambda tt: check_mention_entities(tt), tts)
def mount_path(dirfile, user_id):
return '/home/joao/Dev/Data/Twitter/' + dirfile + str(user_id) + '.csv'
def check_file(path):
return exists(path)
def save_friends(user_id, friends_rows):
path = mount_path('friends/', user_id)
with open(path, 'w') as writer:
friends_ids = map(lambda friend: friend.friend_id, friends_rows)
for friend_id in friends_ids:
writer.write(str(friend_id) + '\n')
writer.flush()
writer.close()
def delete_file(path):
if os.stat(path).st_size == 0:
os.remove(path)
def friends2file():
print "Saving Friends..."
seeds = cassandra.find_seeds()
c = 0
for seeds_row in seeds:
user_id = seeds_row.user_id
friends_rows = cassandra.find_friends(user_id=user_id)
ci = CassandraInsert()
for friend_row in friends_rows:
ci.insert_friendship({'user_id': user_id, 'friend_id': friend_row.friend_id})
c = c + 1
print 'Users: ', c
print "Friends Saved..."
def save_likes(user_id, likes_rows):
lks_tts = map(lambda row: loads(row.tweet), likes_rows)
with open(mount_path('likes/', user_id), 'w') as csvfile:
fieldnames = ['alter_id', 'tweet_id']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for tt in lks_tts:
writer.writerow({
'alter_id': tt['user']['id'],
'tweet_id': tt['id']
})
def likes2file():
print "Saving likes..."
seeds = cassandra.find_seeds()
c = 0
for seeds_row in seeds:
user_id = seeds_row.user_id
ci = CassandraInsert()
likes = map(lambda lk: loads(lk.tweet), cassandra.find_likes(user_id=user_id))
for like in likes:
lk = {
'user_id': user_id,
'tweet_id': like['id'],
'alter_id': like['user']['id']}
ci.insert_lk_interaction(lk)
tt = {
'tweet_id': like['id'],
'date': like['createdAt'],
'lang': like['lang'],
'text': like['text'],
'user_id': like['user']['id']}
ci.insert_tweet(tt)
usr = {
'id': like['user']['id'],
'flw_count': like['user']['followersCount'],
'frd_count': like['user']['friendsCount'],
'is_protected': like['user']['isProtected'],
'is_verified': like['user']['isVerified'],
'lk_count': like['user']['favouritesCount'],
'lang': like['user']['lang'],
'tt_count': like['user']['statusesCount']}
ci.insert_user(usr)
c = c + 1
print 'Users: ', c
print "Likes saved"
def save_retweets(ci, user_id, tweets_rows):
retweets = find_retweets(tweets_rows)
for tt in retweets:
rt = {
'user_id': user_id,
'tweet_id': tt['id'],
'alter_id': tt['user']['id']}
ci.insert_rt_interaction(rt)
_tt = {
'tweet_id': tt['id'],
'date': tt['createdAt'],
'lang': tt['lang'],
'text': tt['text'],
'user_id': tt['user']['id']}
ci.insert_tweet(_tt)
usr = {
'id': tt['user']['id'],
'flw_count': tt['user']['followersCount'],
'frd_count': tt['user']['friendsCount'],
'is_protected': tt['user']['isProtected'],
'is_verified': tt['user']['isVerified'],
'lk_count': tt['user']['favouritesCount'],
'lang': tt['user']['lang'],
'tt_count': tt['user']['statusesCount']}
ci.insert_user(usr)
def save_mentions(ci, user_id, tweets_rows):
tweets = find_mentions(tweets_rows)
for tt in tweets:
for me in tt[MT_STS_KEY]:
me = {
'user_id': user_id,
'tweet_id': tt['id'],
'alter_id': me['id']}
ci.insert_mt_interaction(me)
_tt = {
'tweet_id': tt['id'],
'date': tt['createdAt'],
'lang': tt['lang'],
'text': tt['text'],
'user_id': tt['user']['id']}
ci.insert_tweet(_tt)
usr = {
'id': tt['user']['id'],
'flw_count': tt['user']['followersCount'],
'frd_count': tt['user']['friendsCount'],
'is_protected': tt['user']['isProtected'],
'is_verified': tt['user']['isVerified'],
'lk_count': tt['user']['favouritesCount'],
'lang': tt['user']['lang'],
'tt_count': tt['user']['statusesCount']}
ci.insert_user(usr)
def save_tweets(ci, user_id, tweets_rows):
save_retweets(ci, user_id, tweets_rows)
save_mentions(ci, user_id, tweets_rows)
tweets = remove_retweets(tweets_rows)
for tt in tweets:
_tt = {
'tweet_id': tt['id'],
'date': tt['createdAt'],
'lang': tt['lang'],
'text': tt['text'],
'user_id': tt['user']['id']}
ci.insert_tweet(_tt)
usr = {
'id': tt['user']['id'],
'flw_count': tt['user']['followersCount'],
'frd_count': tt['user']['friendsCount'],
'is_protected': tt['user']['isProtected'],
'is_verified': tt['user']['isVerified'],
'lk_count': tt['user']['favouritesCount'],
'lang': tt['user']['lang'],
'tt_count': tt['user']['statusesCount']}
ci.insert_user(usr)
def tweets2file():
print "Saving Tweets..."
seeds = cassandra.find_seeds()
ci = CassandraInsert()
c = 0
for seeds_row in seeds:
user_id = seeds_row.user_id
tweets_rows = cassandra.find_tweets(user_id=user_id)
save_tweets(ci=ci, user_id=user_id, tweets_rows=tweets_rows)
c = c + 1
print 'Users: ', c
print "Tweets saved"
| jblupus/PyLoyaltyProject | old/project/integration/to_files2.py | Python | bsd-2-clause | 7,009 |
"""
Environment variable configuration loading class.
Using a class here doesn't really model anything but makes state passing (in a
situation requiring it) more convenient.
This module is currently considered private/an implementation detail and should
not be included in the Sphinx API documentation.
"""
import os
from .util import six
from .exceptions import UncastableEnvVar, AmbiguousEnvVar
from .util import debug
class Environment(object):
def __init__(self, config, prefix):
self._config = config
self._prefix = prefix
self.data = {} # Accumulator
def load(self):
"""
Return a nested dict containing values from `os.environ`.
Specifically, values whose keys map to already-known configuration
settings, allowing us to perform basic typecasting.
See :ref:`env-vars` for details.
"""
# Obtain allowed env var -> existing value map
env_vars = self._crawl(key_path=[], env_vars={})
m = "Scanning for env vars according to prefix: {!r}, mapping: {!r}"
debug(m.format(self._prefix, env_vars))
# Check for actual env var (honoring prefix) and try to set
for env_var, key_path in six.iteritems(env_vars):
real_var = (self._prefix or "") + env_var
if real_var in os.environ:
self._path_set(key_path, os.environ[real_var])
debug("Obtained env var config: {!r}".format(self.data))
return self.data
def _crawl(self, key_path, env_vars):
"""
Examine config at location ``key_path`` & return potential env vars.
Uses ``env_vars`` dict to determine if a conflict exists, and raises an
exception if so. This dict is of the following form::
{
'EXPECTED_ENV_VAR_HERE': ['actual', 'nested', 'key_path'],
...
}
Returns another dictionary of new keypairs as per above.
"""
new_vars = {}
obj = self._path_get(key_path)
# Sub-dict -> recurse
if (
hasattr(obj, 'keys')
and callable(obj.keys)
and hasattr(obj, '__getitem__')
):
for key in obj.keys():
merged_vars = dict(env_vars, **new_vars)
merged_path = key_path + [key]
crawled = self._crawl(merged_path, merged_vars)
# Handle conflicts
for key in crawled:
if key in new_vars:
err = "Found >1 source for {}"
raise AmbiguousEnvVar(err.format(key))
# Merge and continue
new_vars.update(crawled)
# Other -> is leaf, no recursion
else:
new_vars[self._to_env_var(key_path)] = key_path
return new_vars
def _to_env_var(self, key_path):
return '_'.join(key_path).upper()
def _path_get(self, key_path):
# Gets are from self._config because that's what determines valid env
# vars and/or values for typecasting.
obj = self._config
for key in key_path:
obj = obj[key]
return obj
def _path_set(self, key_path, value):
# Sets are to self.data since that's what we are presenting to the
# outer config object and debugging.
obj = self.data
for key in key_path[:-1]:
if key not in obj:
obj[key] = {}
obj = obj[key]
old = self._path_get(key_path)
new_ = self._cast(old, value)
obj[key_path[-1]] = new_
def _cast(self, old, new_):
if isinstance(old, bool):
return new_ not in ('0', '')
elif isinstance(old, six.string_types):
return new_
elif old is None:
return new_
elif isinstance(old, (list, tuple)):
err = "Can't adapt an environment string into a {}!"
err = err.format(type(old))
raise UncastableEnvVar(err)
else:
return old.__class__(new_)
| mkusz/invoke | invoke/env.py | Python | bsd-2-clause | 4,077 |
#
# Polymorphic Mixins
#
from silverflask import db
from sqlalchemy.ext.declarative import declared_attr
from silverflask.helper import classproperty
class PolymorphicMixin(object):
type = db.Column(db.String(50))
@declared_attr
def __mapper_args__(cls):
if hasattr(cls, '__versioned_draft_class__'):
# Use same identities as draft class
ident = cls.__versioned_draft_class__.__mapper_args__["polymorphic_identity"]
else:
ident = cls.__tablename__
d = {
'polymorphic_identity': ident,
}
# this is a base object, therefore we are not
# redefining the column on which it is polymorphic
if hasattr(cls.__table__.columns, 'id') and not cls.__table__.columns.id.foreign_keys:
d['polymorphic_on'] = 'type'
return d
| wolfv/SilverFlask | silverflask/mixins/PolymorphicMixin.py | Python | bsd-2-clause | 850 |
from speakeasy import app
app.run(debug=True)
| TinnedTuna/speakeasyspeeches | wsgi/run.py | Python | bsd-2-clause | 46 |
"""
OpenVZ containers
=================
"""
from contextlib import contextmanager
import hashlib
import os
import posixpath
import tempfile
from fabric.api import (
env,
hide,
output,
settings,
sudo,
)
from fabric.operations import (
_AttributeString,
_execute,
_prefix_commands,
_prefix_env_vars,
_shell_wrap,
_sudo_prefix,
)
from fabric.state import default_channel
from fabric.utils import error
import fabric.operations
import fabric.sftp
from fabric.context_managers import (
quiet as quiet_manager,
warn_only as warn_only_manager,
)
@contextmanager
def guest(name_or_ctid):
"""
Context manager to run commands inside a guest container.
Supported basic operations are: `run`_, `sudo`_ and `put`_.
.. warning:: commands executed with ``run()`` will be run as
**root** inside the container.
Use ``sudo(command, user='foo')`` to run them as
an unpriviledged user.
Example::
from fabtools.openvz import guest
with guest('foo'):
run('hostname')
sudo('whoami', user='alice')
put('files/hello.txt')
.. _run: http://docs.fabfile.org/en/1.4.3/api/core/operations.html#fabric.operations.run
.. _sudo: http://docs.fabfile.org/en/1.4.3/api/core/operations.html#fabric.operations.sudo
.. _put: http://docs.fabfile.org/en/1.4.3/api/core/operations.html#fabric.operations.put
"""
# Monkey patch fabric operations
_orig_run_command = fabric.operations._run_command
_orig_put = fabric.sftp.SFTP.put
def run_guest_command(command, shell=True, pty=True, combine_stderr=True,
sudo=False, user=None, quiet=False, warn_only=False, stdout=None,
stderr=None, group=None, timeout=None):
"""
Run command inside a guest container
"""
# Use a non-login shell
_orig_shell = env.shell
env.shell = '/bin/bash -c'
# Use double quotes for the sudo prompt
_orig_sudo_prefix = env.sudo_prefix
env.sudo_prefix = 'sudo -S -p "%(sudo_prompt)s" '
# Try to cd to the user's home directory for consistency,
# as the default directory is "/" with "vzctl exec2"
if not env.cwd:
env.command_prefixes.insert(0, 'cd 2>/dev/null || true')
# Build the guest command
guest_command = _shell_wrap_inner(
_prefix_commands(_prefix_env_vars(command), 'remote'),
True,
_sudo_prefix(user) if sudo and user else None
)
host_command = "vzctl exec2 %s '%s'" % (name_or_ctid, guest_command)
# Restore env
env.shell = _orig_shell
env.sudo_prefix = _orig_sudo_prefix
if not env.cwd:
env.command_prefixes.pop(0)
# Run host command as root
return _run_host_command(host_command, shell=shell, pty=pty,
combine_stderr=combine_stderr)
def put_guest(self, local_path, remote_path, use_sudo, mirror_local_mode,
mode, local_is_path):
"""
Upload file to a guest container
"""
pre = self.ftp.getcwd()
pre = pre if pre else ''
if local_is_path and self.isdir(remote_path):
basename = os.path.basename(local_path)
remote_path = posixpath.join(remote_path, basename)
if output.running:
print(("[%s] put: %s -> %s" % (
env.host_string,
local_path if local_is_path else '<file obj>',
posixpath.join(pre, remote_path)
)))
# Have to bounce off FS if doing file-like objects
fd, real_local_path = None, local_path
if not local_is_path:
fd, real_local_path = tempfile.mkstemp()
old_pointer = local_path.tell()
local_path.seek(0)
file_obj = os.fdopen(fd, 'wb')
file_obj.write(local_path.read())
file_obj.close()
local_path.seek(old_pointer)
# Use temporary file with a unique name on the host machine
guest_path = remote_path
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(name_or_ctid)
hasher.update(guest_path)
host_path = hasher.hexdigest()
# Upload the file to host machine
rattrs = self.ftp.put(real_local_path, host_path)
# Copy file to the guest container
with settings(hide('everything'), cwd=""):
cmd = "cat \"%s\" | vzctl exec \"%s\" 'cat - > \"%s\"'" \
% (host_path, name_or_ctid, guest_path)
_orig_run_command(cmd, sudo=True)
# Revert to original remote_path for return value's sake
remote_path = guest_path
# Clean up
if not local_is_path:
os.remove(real_local_path)
# Handle modes if necessary
if (local_is_path and mirror_local_mode) or (mode is not None):
lmode = os.stat(local_path).st_mode if mirror_local_mode else mode
lmode = lmode & 0o7777
rmode = rattrs.st_mode & 0o7777
if lmode != rmode:
with hide('everything'):
sudo('chmod %o \"%s\"' % (lmode, remote_path))
return remote_path
fabric.operations._run_command = run_guest_command
fabric.sftp.SFTP.put = put_guest
yield
# Monkey unpatch
fabric.operations._run_command = _orig_run_command
fabric.sftp.SFTP.put = _orig_put
@contextmanager
def _noop():
yield
def _run_host_command(command, shell=True, pty=True, combine_stderr=True,
quiet=False, warn_only=False, stdout=None, stderr=None, timeout=None):
"""
Run host wrapper command as root
(Modified from fabric.operations._run_command to ignore prefixes,
path(), cd(), and always use sudo.)
"""
manager = _noop
if warn_only:
manager = warn_only_manager
# Quiet's behavior is a superset of warn_only's, so it wins.
if quiet:
manager = quiet_manager
with manager():
# Set up new var so original argument can be displayed verbatim later.
given_command = command
# Handle context manager modifications, and shell wrapping
wrapped_command = _shell_wrap(
command, # !! removed _prefix_commands() & _prefix_env_vars()
shell,
_sudo_prefix(None) # !! always use sudo
)
# Execute info line
which = 'sudo' # !! always use sudo
if output.debug:
print(("[%s] %s: %s" % (env.host_string, which, wrapped_command)))
elif output.running:
print(("[%s] %s: %s" % (env.host_string, which, given_command)))
# Actual execution, stdin/stdout/stderr handling, and termination
result_stdout, result_stderr, status = _execute(
channel=default_channel(), command=wrapped_command, pty=pty,
combine_stderr=combine_stderr, invoke_shell=False, stdout=stdout,
stderr=stderr, timeout=timeout)
# Assemble output string
out = _AttributeString(result_stdout)
err = _AttributeString(result_stderr)
# Error handling
out.failed = False
out.command = given_command
out.real_command = wrapped_command
if status not in env.ok_ret_codes:
out.failed = True
msg = "%s() received nonzero return code %s while executing" % (
which, status
)
if env.warn_only:
msg += " '%s'!" % given_command
else:
msg += "!\n\nRequested: %s\nExecuted: %s" % (
given_command, wrapped_command
)
error(message=msg, stdout=out, stderr=err)
# Attach return code to output string so users who have set things to
# warn only, can inspect the error code.
out.return_code = status
# Convenience mirror of .failed
out.succeeded = not out.failed
# Attach stderr for anyone interested in that.
out.stderr = err
return out
def _shell_wrap_inner(command, shell=True, sudo_prefix=None):
"""
Conditionally wrap given command in env.shell (while honoring sudo.)
(Modified from fabric.operations._shell_wrap to avoid double escaping,
as the wrapping host command would also get shell escaped.)
"""
# Honor env.shell, while allowing the 'shell' kwarg to override it (at
# least in terms of turning it off.)
if shell and not env.use_shell:
shell = False
# Sudo plus space, or empty string
if sudo_prefix is None:
sudo_prefix = ""
else:
sudo_prefix += " "
# If we're shell wrapping, prefix shell and space, escape the command and
# then quote it. Otherwise, empty string.
if shell:
shell = env.shell + " "
command = '"%s"' % command # !! removed _shell_escape() here
else:
shell = ""
# Resulting string should now have correct formatting
return sudo_prefix + shell + command
| bitmonk/fabtools | fabtools/openvz/contextmanager.py | Python | bsd-2-clause | 9,146 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-29 07:17
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crimeprediction', '0003_auto_20160406_1610'),
]
operations = [
migrations.RemoveField(
model_name='crimesample',
name='crime',
),
migrations.RemoveField(
model_name='crimesample',
name='grids',
),
migrations.DeleteModel(
name='CrimeSample',
),
]
| jayArnel/crimemapping | crimeprediction/migrations/0004_auto_20160429_0717.py | Python | bsd-2-clause | 586 |
import unittest
import tempfile
import shutil
import os
from simlammps.io.lammps_data_file_parser import LammpsDataFileParser
from simlammps.io.lammps_simple_data_handler import LammpsSimpleDataHandler
class TestLammpsSimpleDataHandler(unittest.TestCase):
""" Tests the data reader class
"""
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.handler = LammpsSimpleDataHandler()
self.parser = LammpsDataFileParser(handler=self.handler)
self.filename = os.path.join(self.temp_dir, "test_data.txt")
_write_example_file(self.filename, _data_file_contents)
def tear_down(self):
shutil.rmtree(self.temp_dir)
def test_number_atom_types(self):
self.parser.parse(self.filename)
self.assertEqual(3, self.handler.get_number_atom_types())
def test_masses(self):
self.parser.parse(self.filename)
masses = self.handler.get_masses()
self.assertEqual(len(masses), self.handler.get_number_atom_types())
self.assertEqual(masses[1], 3)
self.assertEqual(masses[2], 42)
self.assertEqual(masses[3], 1)
def test_atoms(self):
self.parser.parse(self.filename)
atoms = self.handler.get_atoms()
for i in range(1, 4):
self.assertTrue(i in atoms)
self.assertEqual(atoms[i][1:4], [i * 1.0, i * 1.0, i * 1.0])
def test_velocities(self):
self.parser.parse(self.filename)
velocities = self.handler.get_velocities()
for i in range(1, 4):
self.assertTrue(i in velocities)
self.assertEqual(velocities[i], [i * 1.0, i * 1.0, i * 1.0])
def _write_example_file(filename, contents):
with open(filename, "w") as text_file:
text_file.write(contents)
_data_file_contents = """LAMMPS data file via write_data, version 28 Jun 2014, timestep = 0
4 atoms
3 atom types
0.0000000000000000e+00 2.5687134504920127e+01 xlo xhi
-2.2245711031688635e-03 2.2247935602791809e+01 ylo yhi
-3.2108918131150160e-01 3.2108918131150160e-01 zlo zhi
Masses
1 3
2 42
3 1
Pair Coeffs # lj/cut
1 1 1
2 1 1
3 1 1
Atoms # atomic
1 1 1.0000000000000000e+00 1.0000000000000000e+00 1.0000000000000000e+00 0 0 0
2 2 2.0000000000000000e+00 2.0000000000000000e+00 2.0000000000000000e+00 0 0 0
3 3 3.0000000000000000e+00 3.0000000000000000e+00 3.0000000000000000e+00 0 0 0
4 2 4.0000000000000000e+00 4.0000000000000000e+00 4.0000000000000000e+00 0 0 0
Velocities
1 1.0000000000000000e+00 1.0000000000000000e+00 1.0000000000000000e+00
2 2.0000000000000000e+00 2.0000000000000000e+00 2.0000000000000000e+00
3 3.0000000000000000e+00 3.0000000000000000e+00 3.0000000000000000e+00
4 4.0000000000000000e+00 4.0000000000000000e+00 4.0000000000000000e+00"""
if __name__ == '__main__':
unittest.main()
| simphony/simphony-lammps-md | simlammps/io/tests/test_lammps_simple_data_handler.py | Python | bsd-2-clause | 2,811 |
from django.conf import settings
class Breadcrumb(object):
"""
A single breadcrumb, which is a 1:1 mapping to a view.
This is simply a wrapper class for the template tag.
"""
def __init__(self, name, url):
self.name = name
self.url = url
class Breadcrumbs(object):
"""
The site's breadcrumbs. An instance of this class is added to each
request, and can be called to add breadcrumbs to the template context.
def some_view(request):
request.breadcrumbs('Title', request.path_info)
request.breadcrumbs('Subtitle', ...)
...
You may prevent the 'Home' link being added by setting BREADCRUMBS_ADD_HOME
to False (defaults to True).
This class supports iteration, and is used as such in the template tag.
"""
_bc = []
def __init__(self, request):
self._request = request
# We must clear the list on every request or we will get duplicates
del self._bc[:]
# By default, add a link to the homepage. This can be disabled or
# configured in the project settings.
if getattr(settings, 'BREADCRUMBS_HOME_LINK', True):
home_name = getattr(settings, 'BREADCRUMBS_HOME_LINK_NAME', 'Home')
home_url = getattr(settings, 'BREADCRUMBS_HOME_LINK_URL', '/')
self._add(home_name, home_url)
def __call__(self, *args, **kwargs):
return self._add(*args, **kwargs)
def __iter__(self):
return iter(self._bc)
def __len__(self):
return len(self._bc)
def _add(self, name, url):
self._bc.append(Breadcrumb(name, url))
class BreadcrumbMiddleware(object):
"""
Middleware to add breadcrumbs into every request.
Add 'breadcrumbs3.middleware.BreadcrumbMiddleware' to MIDDLEWARE_CLASSES
and make sure 'django.template.context_processors.request' is in
TEMPLATES.context_processors.
"""
def process_request(self, request):
request.breadcrumbs = Breadcrumbs(request)
| sjkingo/django-breadcrumbs3 | breadcrumbs3/middleware.py | Python | bsd-2-clause | 2,010 |
# -*- coding: utf-8 -*-
import json
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import last_modified as cache_last_modified
from django.views.decorators.cache import never_cache as force_cache_validation
from django.core.cache import get_cache
from django.shortcuts import redirect
from mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,
MapEntityDetail, MapEntityDocument, MapEntityCreate, MapEntityUpdate,
MapEntityDelete, MapEntityFormat,
HttpJSONResponse)
from geotrek.authent.decorators import same_structure_required
from geotrek.common.utils import classproperty
from .models import Path, Trail, Topology
from .forms import PathForm, TrailForm
from .filters import PathFilterSet, TrailFilterSet
from . import graph as graph_lib
logger = logging.getLogger(__name__)
@login_required
def last_list(request):
last = request.session.get('last_list') # set in MapEntityList
if not last:
return redirect('core:path_list')
return redirect(last)
home = last_list
class CreateFromTopologyMixin(object):
def on_topology(self):
pk = self.request.GET.get('topology')
if pk:
try:
return Topology.objects.existing().get(pk=pk)
except Topology.DoesNotExist:
logger.warning("Intervention on unknown topology %s" % pk)
return None
def get_initial(self):
initial = super(CreateFromTopologyMixin, self).get_initial()
# Create intervention with an existing topology as initial data
topology = self.on_topology()
if topology:
initial['topology'] = topology.serialize(with_pk=False)
return initial
class PathLayer(MapEntityLayer):
model = Path
properties = ['name']
class PathList(MapEntityList):
queryset = Path.objects.prefetch_related('networks').select_related('stake')
filterform = PathFilterSet
@classproperty
def columns(cls):
columns = ['id', 'name', 'networks', 'stake']
if settings.TRAIL_MODEL_ENABLED:
columns.append('trails')
return columns
def get_queryset(self):
"""
denormalize ``trail`` column from list.
"""
qs = super(PathList, self).get_queryset()
denormalized = {}
if settings.TRAIL_MODEL_ENABLED:
paths_id = qs.values_list('id', flat=True)
paths_trails = Trail.objects.filter(aggregations__path__id__in=paths_id)
by_id = dict([(trail.id, trail) for trail in paths_trails])
trails_paths_ids = paths_trails.values_list('id', 'aggregations__path__id')
for trail_id, path_id in trails_paths_ids:
denormalized.setdefault(path_id, []).append(by_id[trail_id])
for path in qs:
path_trails = denormalized.get(path.id, [])
setattr(path, '_trails', path_trails)
yield path
class PathJsonList(MapEntityJsonList, PathList):
pass
class PathFormatList(MapEntityFormat, PathList):
pass
class PathDetail(MapEntityDetail):
model = Path
def context_data(self, *args, **kwargs):
context = super(PathDetail, self).context_data(*args, **kwargs)
context['can_edit'] = self.get_object().same_structure(self.request.user)
return context
class PathDocument(MapEntityDocument):
model = Path
def get_context_data(self, *args, **kwargs):
self.get_object().prepare_elevation_chart(self.request.build_absolute_uri('/'))
return super(PathDocument, self).get_context_data(*args, **kwargs)
class PathCreate(MapEntityCreate):
model = Path
form_class = PathForm
class PathUpdate(MapEntityUpdate):
model = Path
form_class = PathForm
@same_structure_required('core:path_detail')
def dispatch(self, *args, **kwargs):
return super(PathUpdate, self).dispatch(*args, **kwargs)
class PathDelete(MapEntityDelete):
model = Path
@same_structure_required('core:path_detail')
def dispatch(self, *args, **kwargs):
return super(PathDelete, self).dispatch(*args, **kwargs)
@login_required
@cache_last_modified(lambda x: Path.latest_updated())
@force_cache_validation
def get_graph_json(request):
cache = get_cache('fat')
key = 'path_graph_json'
result = cache.get(key)
latest = Path.latest_updated()
if result and latest:
cache_latest, json_graph = result
# Not empty and still valid
if cache_latest and cache_latest >= latest:
return HttpJSONResponse(json_graph)
# cache does not exist or is not up to date
# rebuild the graph and cache the json
graph = graph_lib.graph_edges_nodes_of_qs(Path.objects.all())
json_graph = json.dumps(graph)
cache.set(key, (latest, json_graph))
return HttpJSONResponse(json_graph)
class TrailLayer(MapEntityLayer):
queryset = Trail.objects.existing()
properties = ['name']
class TrailList(MapEntityList):
queryset = Trail.objects.existing()
filterform = TrailFilterSet
columns = ['id', 'name', 'departure', 'arrival']
class TrailDetail(MapEntityDetail):
queryset = Trail.objects.existing()
def context_data(self, *args, **kwargs):
context = super(TrailDetail, self).context_data(*args, **kwargs)
context['can_edit'] = self.get_object().same_structure(self.request.user)
return context
class TrailDocument(MapEntityDocument):
queryset = Trail.objects.existing()
class TrailCreate(CreateFromTopologyMixin, MapEntityCreate):
model = Trail
form_class = TrailForm
class TrailUpdate(MapEntityUpdate):
queryset = Trail.objects.existing()
form_class = TrailForm
@same_structure_required('core:trail_detail')
def dispatch(self, *args, **kwargs):
return super(TrailUpdate, self).dispatch(*args, **kwargs)
class TrailDelete(MapEntityDelete):
queryset = Trail.objects.existing()
@same_structure_required('core:trail_detail')
def dispatch(self, *args, **kwargs):
return super(TrailDelete, self).dispatch(*args, **kwargs)
| camillemonchicourt/Geotrek | geotrek/core/views.py | Python | bsd-2-clause | 6,257 |
class LimeError(Exception):
"""Raise for errors"""
| marcotcr/lime | lime/exceptions.py | Python | bsd-2-clause | 55 |
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
YOUTUBE_URL = "https://www.youtube.com/watch?v={0}"
_url_re = re.compile(r'http(s)?://www\.skai.gr/.*')
_youtube_id = re.compile(r'<span\s+itemprop="contentUrl"\s+href="(.*)"></span>', re.MULTILINE)
_youtube_url_schema = validate.Schema(
validate.all(
validate.transform(_youtube_id.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.text
)
)
)
)
class Skai(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
channel_id = self.session.http.get(self.url, schema=_youtube_url_schema)
if channel_id:
return self.session.streams(YOUTUBE_URL.format(channel_id))
__plugin__ = Skai
| back-to/streamlink | src/streamlink/plugins/skai.py | Python | bsd-2-clause | 889 |
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import redirect, render
from users.forms import UserCreationForm
def create_user_account(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
primary_email = form.cleaned_data['primary_email']
password = form.cleaned_data['password1']
user = authenticate(username=primary_email, password=password)
login(request, user)
return redirect('/')
else:
form = UserCreationForm()
return render(request, 'users/create_user_account.html', {'form': form})
| kbarnes3/BaseDjangoSite | web/users/views.py | Python | bsd-2-clause | 696 |
# The Nexus software is licensed under the BSD 2-Clause license.
#
# You should have recieved a copy of this license with the software.
# If you did not, you can find one at the following link.
#
# http://opensource.org/licenses/bsd-license.php
entitylist.append(["confetti",(x,y,z),6,6])
| TheArchives/Nexus | core/entities/confetti_create.py | Python | bsd-2-clause | 291 |
from .api import *
from .reduction import Reduce
from .cudadrv.devicearray import (device_array, device_array_like, pinned,
pinned_array, pinned_array_like, to_device, auto_device)
from .cudadrv import devicearray
from .cudadrv.devices import require_context, gpus
from .cudadrv.devices import get_context as current_context
from .cudadrv.runtime import runtime
from numba.core import config
reduce = Reduce
# Ensure that any user code attempting to import cudadrv etc. gets the
# simulator's version and not the real version if the simulator is enabled.
if config.ENABLE_CUDASIM:
import sys
from numba.cuda.simulator import cudadrv
sys.modules['numba.cuda.cudadrv'] = cudadrv
sys.modules['numba.cuda.cudadrv.devicearray'] = cudadrv.devicearray
sys.modules['numba.cuda.cudadrv.devices'] = cudadrv.devices
sys.modules['numba.cuda.cudadrv.driver'] = cudadrv.driver
sys.modules['numba.cuda.cudadrv.runtime'] = cudadrv.runtime
sys.modules['numba.cuda.cudadrv.drvapi'] = cudadrv.drvapi
sys.modules['numba.cuda.cudadrv.nvvm'] = cudadrv.nvvm
from . import compiler
sys.modules['numba.cuda.compiler'] = compiler
| sklam/numba | numba/cuda/simulator/__init__.py | Python | bsd-2-clause | 1,170 |
from confduino.boardlist import boards_txt
from entrypoint2 import entrypoint
import logging
log = logging.getLogger(__name__)
@entrypoint
def remove_board(board_id):
"""remove board.
:param board_id: board id (e.g. 'diecimila')
:rtype: None
"""
log.debug('remove %s', board_id)
lines = boards_txt().lines()
lines = filter(lambda x: not x.strip().startswith(board_id + '.'), lines)
boards_txt().write_lines(lines)
| ponty/confduino | confduino/boardremove.py | Python | bsd-2-clause | 452 |
import time
import os
import threading
import collections
from xlog import getLogger
xlog = getLogger("cloudflare_front")
xlog.set_buffer(500)
import simple_http_client
from config import config
import http_dispatcher
import connect_control
import check_ip
class Front(object):
name = "cloudflare_front"
def __init__(self):
self.dispatchs = {}
threading.Thread(target=self.update_front_domains).start()
self.last_success_time = time.time()
self.last_fail_time = 0
self.continue_fail_num = 0
self.success_num = 0
self.fail_num = 0
self.last_host = "center.xx-net.net"
self.rtts = collections.deque([(0, time.time())])
self.rtts_lock = threading.Lock()
self.traffics = collections.deque()
self.traffics_lock = threading.Lock()
self.recent_sent = 0
self.recent_received = 0
self.total_sent = 0
self.total_received = 0
threading.Thread(target=self.debug_data_clearup_thread).start()
@staticmethod
def update_front_domains():
next_update_time = time.time()
while connect_control.keep_running:
if time.time() < next_update_time:
time.sleep(4)
continue
try:
timeout = 30
if config.getint("proxy", "enable", 0):
client = simple_http_client.Client(proxy={
"type": config.get("proxy", "type", ""),
"host": config.get("proxy", "host", ""),
"port": int(config.get("proxy", "port", "0")),
"user": config.get("proxy", "user", ""),
"pass": config.get("proxy", "passwd", ""),
}, timeout=timeout)
else:
client = simple_http_client.Client(timeout=timeout)
url = "https://raw.githubusercontent.com/XX-net/XX-Net/master/code/default/x_tunnel/local/cloudflare_front/front_domains.json"
response = client.request("GET", url)
if response.status != 200:
xlog.warn("update front domains fail:%d", response.status)
raise Exception("status:%r", response.status)
need_update = True
front_domains_fn = os.path.join(config.DATA_PATH, "front_domains.json")
if os.path.exists(front_domains_fn):
with open(front_domains_fn, "r") as fd:
old_content = fd.read()
if response.text == old_content:
need_update = False
if need_update:
with open(front_domains_fn, "w") as fd:
fd.write(response.text)
check_ip.update_front_domains()
next_update_time = time.time() + (4 * 3600)
xlog.info("updated cloudflare front domains from github.")
except Exception as e:
next_update_time = time.time() + (1800)
xlog.debug("updated cloudflare front domains from github fail:%r", e)
def log_debug_data(self, rtt, sent, received):
now = time.time()
self.rtts.append((rtt, now))
with self.traffics_lock:
self.traffics.append((sent, received, now))
self.recent_sent += sent
self.recent_received += received
self.total_sent += sent
self.total_received += received
def get_rtt(self):
now = time.time()
while len(self.rtts) > 1:
with self.rtts_lock:
rtt, log_time = rtt_log = max(self.rtts)
if now - log_time > 5:
self.rtts.remove(rtt_log)
continue
return rtt
return self.rtts[0][0]
def debug_data_clearup_thread(self):
while True:
now = time.time()
with self.rtts_lock:
if len(self.rtts) > 1 and now - self.rtts[0][-1] > 5:
self.rtts.popleft()
with self.traffics_lock:
if self.traffics and now - self.traffics[0][-1] > 60:
sent, received, _ = self.traffics.popleft()
self.recent_sent -= sent
self.recent_received -= received
time.sleep(1)
def worker_num(self):
host = self.last_host
if host not in self.dispatchs:
self.dispatchs[host] = http_dispatcher.HttpsDispatcher(host, self.log_debug_data)
dispatcher = self.dispatchs[host]
return len(dispatcher.workers)
def get_score(self, host=None):
now = time.time()
if now - self.last_fail_time < 60 and \
self.continue_fail_num > 10:
return None
if host is None:
host = self.last_host
if host not in self.dispatchs:
self.dispatchs[host] = http_dispatcher.HttpsDispatcher(host, self.log_debug_data)
dispatcher = self.dispatchs[host]
worker = dispatcher.get_worker(nowait=True)
if not worker:
return None
return worker.get_score()
def request(self, method, host, path="/", headers={}, data="", timeout=120):
if host not in self.dispatchs:
self.dispatchs[host] = http_dispatcher.HttpsDispatcher(host, self.log_debug_data)
self.last_host = host
dispatcher = self.dispatchs[host]
response = dispatcher.request(method, host, path, dict(headers), data, timeout=timeout)
if not response:
xlog.warn("req %s get response timeout", path)
return "", 602, {}
status = response.status
if status not in [200, 405]:
# xlog.warn("front request %s %s%s fail, status:%d", method, host, path, status)
self.fail_num += 1
self.continue_fail_num += 1
self.last_fail_time = time.time()
else:
self.success_num += 1
self.continue_fail_num = 0
content = response.task.read_all()
if status == 200:
xlog.debug("%s %s%s status:%d trace:%s", method, response.worker.ssl_sock.host, path, status,
response.task.get_trace())
else:
xlog.warn("%s %s%s status:%d trace:%s", method, response.worker.ssl_sock.host, path, status,
response.task.get_trace())
return content, status, response
def stop(self):
connect_control.keep_running = False
front = Front()
| qqzwc/XX-Net | code/default/x_tunnel/local/cloudflare_front/front.py | Python | bsd-2-clause | 6,638 |
import warnings
import numpy as np
import numbers
import collections
from warnings import warn
from scipy.sparse import csr_matrix
from scipy.spatial.distance import cdist
from menpo.transform import WithDims
from .base import Shape
def bounding_box(closest_to_origin, opposite_corner):
r"""
Return a bounding box from two corner points as a directed graph.
The the first point (0) should be nearest the origin.
In the case of an image, this ordering would appear as:
::
0<--3
| ^
| |
v |
1-->2
In the case of a pointcloud, the ordering will appear as:
::
3<--2
| ^
| |
v |
0-->1
Parameters
----------
closest_to_origin : (`float`, `float`)
Two floats representing the coordinates closest to the origin.
Represented by (0) in the graph above. For an image, this will
be the top left. For a pointcloud, this will be the bottom left.
opposite_corner : (`float`, `float`)
Two floats representing the coordinates opposite the corner closest
to the origin.
Represented by (2) in the graph above. For an image, this will
be the bottom right. For a pointcloud, this will be the top right.
Returns
-------
bounding_box : :map:`PointDirectedGraph`
The axis aligned bounding box from the two given corners.
"""
from .graph import PointDirectedGraph
if len(closest_to_origin) != 2 or len(opposite_corner) != 2:
raise ValueError('Only 2D bounding boxes can be created.')
adjacency_matrix = csr_matrix(([1] * 4, ([0, 1, 2, 3], [1, 2, 3, 0])),
shape=(4, 4))
box = np.array([closest_to_origin,
[opposite_corner[0], closest_to_origin[1]],
opposite_corner,
[closest_to_origin[0], opposite_corner[1]]], dtype=np.float)
return PointDirectedGraph(box, adjacency_matrix, copy=False)
def bounding_cuboid(near_closest_to_origin, far_opposite_corner):
r"""
Return a bounding cuboid from the near closest and far opposite
corners as a directed graph.
Parameters
----------
near_closest_to_origin : (`float`, `float`, `float`)
Three floats representing the coordinates of the near corner closest to
the origin.
far_opposite_corner : (`float`, `float`, `float`)
Three floats representing the coordinates of the far opposite corner
compared to near_closest_to_origin.
Returns
-------
bounding_box : :map:`PointDirectedGraph`
The axis aligned bounding cuboid from the two given corners.
"""
from .graph import PointDirectedGraph
if len(near_closest_to_origin) != 3 or len(far_opposite_corner) != 3:
raise ValueError('Only 3D bounding cuboids can be created.')
adjacency_matrix = csr_matrix(
([1] * 12,
([0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7],
[1, 2, 3, 0, 4, 5, 6, 7, 5, 6, 7, 4])), shape=(8, 8))
cuboid = np.array(
[near_closest_to_origin, [far_opposite_corner[0],
near_closest_to_origin[1],
near_closest_to_origin[2]],
[far_opposite_corner[0],
far_opposite_corner[1],
near_closest_to_origin[2]], [near_closest_to_origin[0],
far_opposite_corner[1],
near_closest_to_origin[2]],
[near_closest_to_origin[0],
near_closest_to_origin[1],
far_opposite_corner[2]], [far_opposite_corner[0],
near_closest_to_origin[1],
far_opposite_corner[2]],
far_opposite_corner, [near_closest_to_origin[0],
far_opposite_corner[1],
far_opposite_corner[2]]], dtype=np.float)
return PointDirectedGraph(cuboid, adjacency_matrix, copy=False)
class PointCloud(Shape):
r"""
An N-dimensional point cloud. This is internally represented as an `ndarray`
of shape ``(n_points, n_dims)``. This class is important for dealing
with complex functionality such as viewing and representing metadata such
as landmarks.
Currently only 2D and 3D pointclouds are viewable.
Parameters
----------
points : ``(n_points, n_dims)`` `ndarray`
The array representing the points.
copy : `bool`, optional
If ``False``, the points will not be copied on assignment. Note that
this will miss out on additional checks. Further note that we still
demand that the array is C-contiguous - if it isn't, a copy will be
generated anyway.
In general this should only be used if you know what you are doing.
"""
def __init__(self, points, copy=True):
super(PointCloud, self).__init__()
if not copy:
if not points.flags.c_contiguous:
warn('The copy flag was NOT honoured. A copy HAS been made. '
'Please ensure the data you pass is C-contiguous.')
points = np.array(points, copy=True, order='C')
else:
points = np.array(points, copy=True, order='C')
self.points = points
@classmethod
def init_2d_grid(cls, shape, spacing=None):
r"""
Create a pointcloud that exists on a regular 2D grid. The first
dimension is the number of rows in the grid and the second dimension
of the shape is the number of columns. ``spacing`` optionally allows
the definition of the distance between points (uniform over points).
The spacing may be different for rows and columns.
Parameters
----------
shape : `tuple` of 2 `int`
The size of the grid to create, this defines the number of points
across each dimension in the grid. The first element is the number
of rows and the second is the number of columns.
spacing : `int` or `tuple` of 2 `int`, optional
The spacing between points. If a single `int` is provided, this
is applied uniformly across each dimension. If a `tuple` is
provided, the spacing is applied non-uniformly as defined e.g.
``(2, 3)`` gives a spacing of 2 for the rows and 3 for the
columns.
Returns
-------
shape_cls : `type(cls)`
A PointCloud or subclass arranged in a grid.
"""
if len(shape) != 2:
raise ValueError('shape must be 2D.')
grid = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]),
indexing='ij')
points = np.require(np.concatenate(grid).reshape([2, -1]).T,
dtype=np.float64, requirements=['C'])
if spacing is not None:
if not (isinstance(spacing, numbers.Number) or
isinstance(spacing, collections.Sequence)):
raise ValueError('spacing must be either a single number '
'to be applied over each dimension, or a 2D '
'sequence of numbers.')
if isinstance(spacing, collections.Sequence) and len(spacing) != 2:
raise ValueError('spacing must be 2D.')
points *= np.asarray(spacing, dtype=np.float64)
return cls(points, copy=False)
@classmethod
def init_from_depth_image(cls, depth_image):
r"""
Return a 3D point cloud from the given depth image. The depth image
is assumed to represent height/depth values and the XY coordinates
are assumed to unit spaced and represent image coordinates. This is
particularly useful for visualising depth values that have been
recovered from images.
Parameters
----------
depth_image : :map:`Image` or subclass
A single channel image that contains depth values - as commonly
returned by RGBD cameras, for example.
Returns
-------
depth_cloud : ``type(cls)``
A new 3D PointCloud with unit XY coordinates and the given depth
values as Z coordinates.
"""
from menpo.image import MaskedImage
new_pcloud = cls.init_2d_grid(depth_image.shape)
if isinstance(depth_image, MaskedImage):
new_pcloud = new_pcloud.from_mask(depth_image.mask.as_vector())
return cls(np.hstack([new_pcloud.points,
depth_image.as_vector(keep_channels=True).T]),
copy=False)
def with_dims(self, dims):
r"""
Return a copy of this shape with only particular dimensions retained.
Parameters
----------
dims : valid numpy array slice
The slice that will be used on the dimensionality axis of the shape
under transform. For example, to go from a 3D shape to a 2D one,
[0, 1] could be provided or np.array([True, True, False]).
Returns
-------
copy of self, with only the requested dims
"""
return WithDims(dims).apply(self)
@property
def lms(self):
"""Deprecated.
Maintained for compatibility, will be removed in a future version.
Returns a copy of this object, which previously would have held
the 'underlying' :map:`PointCloud` subclass.
:type: self
"""
from menpo.base import MenpoDeprecationWarning
warnings.warn('The .lms property is deprecated. LandmarkGroups are '
'now shapes themselves - so you can use them directly '
'anywhere you previously used .lms.'
'Simply remove ".lms" from your code and things '
'will work as expected (and this warning will go away)',
MenpoDeprecationWarning)
return self.copy()
@property
def n_points(self):
r"""
The number of points in the pointcloud.
:type: `int`
"""
return self.points.shape[0]
@property
def n_dims(self):
r"""
The number of dimensions in the pointcloud.
:type: `int`
"""
return self.points.shape[1]
def h_points(self):
r"""
Convert poincloud to a homogeneous array: ``(n_dims + 1, n_points)``
:type: ``type(self)``
"""
return np.concatenate((self.points.T,
np.ones(self.n_points,
dtype=self.points.dtype)[None, :]))
def centre(self):
r"""
The mean of all the points in this PointCloud (centre of mass).
Returns
-------
centre : ``(n_dims)`` `ndarray`
The mean of this PointCloud's points.
"""
return np.mean(self.points, axis=0)
def centre_of_bounds(self):
r"""
The centre of the absolute bounds of this PointCloud. Contrast with
:meth:`centre`, which is the mean point position.
Returns
-------
centre : ``n_dims`` `ndarray`
The centre of the bounds of this PointCloud.
"""
min_b, max_b = self.bounds()
return (min_b + max_b) / 2.0
def _as_vector(self):
r"""
Returns a flattened representation of the pointcloud.
Note that the flattened representation is of the form
``[x0, y0, x1, y1, ....., xn, yn]`` for 2D.
Returns
-------
flattened : ``(n_points,)`` `ndarray`
The flattened points.
"""
return self.points.ravel()
def tojson(self):
r"""
Convert this :map:`PointCloud` to a dictionary representation suitable
for inclusion in the LJSON landmark format.
Returns
-------
json : `dict`
Dictionary with ``points`` keys.
"""
return {
'labels': [],
'landmarks': {
'points': self.points.tolist()
}
}
def _from_vector_inplace(self, vector):
r"""
Updates the points of this PointCloud in-place with the reshaped points
from the provided vector. Note that the vector should have the form
``[x0, y0, x1, y1, ....., xn, yn]`` for 2D.
Parameters
----------
vector : ``(n_points,)`` `ndarray`
The vector from which to create the points' array.
"""
self.points = vector.reshape([-1, self.n_dims])
def __str__(self):
return '{}: n_points: {}, n_dims: {}'.format(type(self).__name__,
self.n_points,
self.n_dims)
def bounds(self, boundary=0):
r"""
The minimum to maximum extent of the PointCloud. An optional boundary
argument can be provided to expand the bounds by a constant margin.
Parameters
----------
boundary : `float`
A optional padding distance that is added to the bounds. Default
is ``0``, meaning the max/min of tightest possible containing
square/cube/hypercube is returned.
Returns
-------
min_b : ``(n_dims,)`` `ndarray`
The minimum extent of the :map:`PointCloud` and boundary along
each dimension
max_b : ``(n_dims,)`` `ndarray`
The maximum extent of the :map:`PointCloud` and boundary along
each dimension
"""
min_b = np.min(self.points, axis=0) - boundary
max_b = np.max(self.points, axis=0) + boundary
return min_b, max_b
def range(self, boundary=0):
r"""
The range of the extent of the PointCloud.
Parameters
----------
boundary : `float`
A optional padding distance that is used to extend the bounds
from which the range is computed. Default is ``0``, no extension
is performed.
Returns
-------
range : ``(n_dims,)`` `ndarray`
The range of the :map:`PointCloud` extent in each dimension.
"""
min_b, max_b = self.bounds(boundary)
return max_b - min_b
def bounding_box(self):
r"""
Return a bounding box from two corner points as a directed graph.
In the case of a 2D pointcloud, first point (0) should be nearest the
origin. In the case of an image, this ordering would appear as:
::
0<--3
| ^
| |
v |
1-->2
In the case of a pointcloud, the ordering will appear as:
::
3<--2
| ^
| |
v |
0-->1
In the case of a 3D pointcloud, the first point (0) should be the
near closest to the origin and the second point is the far opposite
corner.
Returns
-------
bounding_box : :map:`PointDirectedGraph`
The axis aligned bounding box of the PointCloud.
"""
if self.n_dims != 2 and self.n_dims != 3:
raise ValueError('Bounding boxes are only supported for 2D or 3D '
'pointclouds.')
min_p, max_p = self.bounds()
if self.n_dims == 2:
return bounding_box(min_p, max_p)
elif self.n_dims == 3:
return bounding_cuboid(min_p, max_p)
def _view_2d(self, figure_id=None, new_figure=False, image_view=True,
render_markers=True, marker_style='o', marker_size=5,
marker_face_colour='r', marker_edge_colour='k',
marker_edge_width=1., render_numbering=False,
numbers_horizontal_align='center',
numbers_vertical_align='bottom',
numbers_font_name='sans-serif', numbers_font_size=10,
numbers_font_style='normal', numbers_font_weight='normal',
numbers_font_colour='k', render_axes=True,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None, axes_x_ticks=None,
axes_y_ticks=None, figure_size=(10, 8), label=None, **kwargs):
r"""
Visualization of the PointCloud in 2D.
Returns
-------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
image_view : `bool`, optional
If ``True`` the PointCloud will be viewed as if it is in the image
coordinate system.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the PointCloud as a percentage of the PointCloud's
width. If `tuple` or `list`, then it defines the axis limits. If
``None``, then the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the PointCloud as a percentage of the PointCloud's
height. If `tuple` or `list`, then it defines the axis limits. If
``None``, then the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None``, optional
The size of the figure in inches.
label : `str`, optional
The name entry in case of a legend.
Returns
-------
viewer : :map:`PointGraphViewer2d`
The viewer object.
"""
from menpo.visualize.base import PointGraphViewer2d
adjacency_array = np.empty(0)
renderer = PointGraphViewer2d(figure_id, new_figure,
self.points, adjacency_array)
renderer.render(
image_view=image_view, render_lines=False, line_colour='b',
line_style='-', line_width=1., render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour, render_axes=render_axes,
axes_font_name=axes_font_name, axes_font_size=axes_font_size,
axes_font_style=axes_font_style, axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks,
figure_size=figure_size, label=label)
return renderer
def _view_landmarks_2d(self, group=None, with_labels=None,
without_labels=None, figure_id=None,
new_figure=False, image_view=True, render_lines=True,
line_colour=None, line_style='-', line_width=1,
render_markers=True, marker_style='o',
marker_size=5, marker_face_colour=None,
marker_edge_colour=None, marker_edge_width=1.,
render_numbering=False,
numbers_horizontal_align='center',
numbers_vertical_align='bottom',
numbers_font_name='sans-serif', numbers_font_size=10,
numbers_font_style='normal',
numbers_font_weight='normal',
numbers_font_colour='k', render_legend=False,
legend_title='', legend_font_name='sans-serif',
legend_font_style='normal', legend_font_size=10,
legend_font_weight='normal',
legend_marker_scale=None, legend_location=2,
legend_bbox_to_anchor=(1.05, 1.),
legend_border_axes_pad=None, legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None, legend_border=True,
legend_border_padding=None, legend_shadow=False,
legend_rounded_corners=False, render_axes=False,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None,
axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 8)):
"""
Visualize the landmarks. This method will appear on the Image as
``view_landmarks`` if the Image is 2D.
Parameters
----------
group : `str` or``None`` optional
The landmark group to be visualized. If ``None`` and there are more
than one landmark groups, an error is raised.
with_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, only show the given label(s). Should **not** be
used with the ``without_labels`` kwarg.
without_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, show all except the given label(s). Should **not**
be used with the ``with_labels`` kwarg.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
image_view : `bool`, optional
If ``True`` the PointCloud will be viewed as if it is in the image
coordinate system.
render_lines : `bool`, optional
If ``True``, the edges will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
legend_font_style : ``{normal, italic, oblique}``, optional
The font style of the legend.
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : See Below, optional
The font weight of the legend.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ==
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ==
legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the PointCloud as a percentage of the PointCloud's
width. If `tuple` or `list`, then it defines the axis limits. If
``None``, then the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the PointCloud as a percentage of the PointCloud's
height. If `tuple` or `list`, then it defines the axis limits. If
``None``, then the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Raises
------
ValueError
If both ``with_labels`` and ``without_labels`` are passed.
ValueError
If the landmark manager doesn't contain the provided group label.
"""
if not self.has_landmarks:
raise ValueError('PointCloud does not have landmarks attached, '
'unable to view landmarks.')
self_view = self.view(figure_id=figure_id, new_figure=new_figure,
image_view=image_view, figure_size=figure_size)
landmark_view = self.landmarks[group].view(
with_labels=with_labels, without_labels=without_labels,
figure_id=self_view.figure_id, new_figure=False,
image_view=image_view, render_lines=render_lines,
line_colour=line_colour, line_style=line_style,
line_width=line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend, legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, figure_size=figure_size)
return landmark_view
def _view_3d(self, figure_id=None, new_figure=True, render_markers=True,
marker_style='sphere', marker_size=None, marker_colour='r',
marker_resolution=8, step=None, alpha=1.0,
render_numbering=False, numbers_colour='k', numbers_size=None,
**kwargs):
r"""
Visualization of the PointCloud in 3D.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : `str`, optional
The style of the markers.
Example options ::
{2darrow, 2dcircle, 2dcross, 2ddash, 2ddiamond, 2dhooked_arrow,
2dsquare, 2dthick_arrow, 2dthick_cross, 2dtriangle, 2dvertex,
arrow, axes, cone, cube, cylinder, point, sphere}
marker_size : `float` or ``None``, optional
The size of the markers. This size can be seen as a scale factor
applied to the size markers, which is by default calculated from
the inter-marker spacing. If ``None``, then an optimal marker size
value will be set automatically.
marker_colour : See Below, optional
The colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_resolution : `int`, optional
The resolution of the markers. For spheres, for instance, this is
the number of divisions along theta and phi.
step : `int` or ``None``, optional
If `int`, then one every `step` vertexes will be rendered.
If ``None``, then all vertexes will be rendered.
alpha : `float`, optional
Defines the transparency (opacity) of the object.
render_numbering : `bool`, optional
If ``True``, the points will be numbered.
numbers_colour : See Below, optional
The colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
numbers_size : `float` or ``None``, optional
The size of the numbers. This size can be seen as a scale factor
applied to the numbers, which is by default calculated from
the inter-marker spacing. If ``None``, then an optimal numbers size
value will be set automatically.
Returns
-------
renderer : `menpo3d.visualize.PointGraphViewer3d`
The Menpo3D rendering object.
"""
try:
from menpo3d.visualize import PointGraphViewer3d
edges = np.empty(0)
renderer = PointGraphViewer3d(figure_id, new_figure,
self.points, edges)
renderer.render(
render_lines=False, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_colour=marker_colour, marker_resolution=marker_resolution,
step=step, alpha=alpha, render_numbering=render_numbering,
numbers_colour=numbers_colour, numbers_size=numbers_size)
return renderer
except ImportError:
from menpo.visualize import Menpo3dMissingError
raise Menpo3dMissingError()
def _view_landmarks_3d(self, group=None, with_labels=None,
without_labels=None, figure_id=None,
new_figure=True, render_lines=True,
line_colour=None, line_width=4, render_markers=True,
marker_style='sphere', marker_size=None,
marker_colour=None, marker_resolution=8,
step=None, alpha=1.0, render_numbering=False,
numbers_colour='k', numbers_size=None):
r"""
Visualization of the PointCloud landmarks in 3D.
Parameters
----------
with_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, only show the given label(s). Should **not** be
used with the ``without_labels`` kwarg.
without_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, show all except the given label(s). Should **not**
be used with the ``with_labels`` kwarg.
group : `str` or `None`, optional
The landmark group to be visualized. If ``None`` and there are more
than one landmark groups, an error is raised.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_lines : `bool`, optional
If ``True``, then the lines will be rendered.
line_colour : See Below, optional
The colour of the lines. If ``None``, a different colour will be
automatically selected for each label.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
or
None
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, then the markers will be rendered.
marker_style : `str`, optional
The style of the markers.
Example options ::
{2darrow, 2dcircle, 2dcross, 2ddash, 2ddiamond, 2dhooked_arrow,
2dsquare, 2dthick_arrow, 2dthick_cross, 2dtriangle, 2dvertex,
arrow, axes, cone, cube, cylinder, point, sphere}
marker_size : `float` or ``None``, optional
The size of the markers. This size can be seen as a scale factor
applied to the size markers, which is by default calculated from
the inter-marker spacing. If ``None``, then an optimal marker size
value will be set automatically.
marker_colour : See Below, optional
The colour of the markers. If ``None``, a different colour will be
automatically selected for each label.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
or
None
marker_resolution : `int`, optional
The resolution of the markers. For spheres, for instance, this is
the number of divisions along theta and phi.
step : `int` or ``None``, optional
If `int`, then one every `step` vertexes will be rendered.
If ``None``, then all vertexes will be rendered.
alpha : `float`, optional
Defines the transparency (opacity) of the object.
render_numbering : `bool`, optional
If ``True``, the points will be numbered.
numbers_colour : See Below, optional
The colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
numbers_size : `float` or ``None``, optional
The size of the numbers. This size can be seen as a scale factor
applied to the numbers, which is by default calculated from
the inter-marker spacing. If ``None``, then an optimal numbers size
value will be set automatically.
Returns
-------
renderer : `menpo3d.visualize.LandmarkViewer3d`
The Menpo3D rendering object.
"""
if not self.has_landmarks:
raise ValueError('PointCloud does not have landmarks attached, '
'unable to view landmarks.')
self_view = self.view(figure_id=figure_id, new_figure=new_figure)
landmark_view = self.landmarks[group].view(
with_labels=with_labels, without_labels=without_labels,
figure_id=self_view.figure_id, new_figure=False,
render_lines=render_lines, line_colour=line_colour,
line_width=line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_colour=marker_colour, marker_resolution=marker_resolution,
step=step, alpha=alpha, render_numbering=render_numbering,
numbers_colour=numbers_colour, numbers_size=numbers_size)
return landmark_view
def view_widget(self, browser_style='buttons', figure_size=(10, 8),
style='coloured'):
r"""
Visualization of the PointCloud using an interactive widget.
Parameters
----------
browser_style : {``'buttons'``, ``'slider'``}, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
style : {``'coloured'``, ``'minimal'``}, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
try:
from menpowidgets import visualize_pointclouds
visualize_pointclouds(self, figure_size=figure_size, style=style,
browser_style=browser_style)
except ImportError:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError()
def _transform_self_inplace(self, transform):
self.points = transform(self.points)
return self
def distance_to(self, pointcloud, **kwargs):
r"""
Returns a distance matrix between this PointCloud and another.
By default the Euclidean distance is calculated - see
`scipy.spatial.distance.cdist` for valid kwargs to change the metric
and other properties.
Parameters
----------
pointcloud : :map:`PointCloud`
The second pointcloud to compute distances between. This must be
of the same dimension as this PointCloud.
Returns
-------
distance_matrix: ``(n_points, n_points)`` `ndarray`
The symmetric pairwise distance matrix between the two PointClouds
s.t. ``distance_matrix[i, j]`` is the distance between the i'th
point of this PointCloud and the j'th point of the input
PointCloud.
"""
if self.n_dims != pointcloud.n_dims:
raise ValueError("The two PointClouds must be of the same "
"dimensionality.")
return cdist(self.points, pointcloud.points, **kwargs)
def norm(self, **kwargs):
r"""
Returns the norm of this PointCloud. This is a translation and
rotation invariant measure of the point cloud's intrinsic size - in
other words, it is always taken around the point cloud's centre.
By default, the Frobenius norm is taken, but this can be changed by
setting kwargs - see ``numpy.linalg.norm`` for valid options.
Returns
-------
norm : `float`
The norm of this :map:`PointCloud`
"""
return np.linalg.norm(self.points - self.centre(), **kwargs)
def from_mask(self, mask):
"""
A 1D boolean array with the same number of elements as the number of
points in the PointCloud. This is then broadcast across the dimensions
of the PointCloud and returns a new PointCloud containing only those
points that were ``True`` in the mask.
Parameters
----------
mask : ``(n_points,)`` `ndarray`
1D array of booleans
Returns
-------
pointcloud : :map:`PointCloud`
A new pointcloud that has been masked.
Raises
------
ValueError
Mask must have same number of points as pointcloud.
"""
if mask.shape[0] != self.n_points:
raise ValueError('Mask must be a 1D boolean array of the same '
'number of entries as points in this PointCloud.')
pc = self.copy()
pc.points = pc.points[mask, :]
return pc
def constrain_to_bounds(self, bounds):
r"""
Returns a copy of this PointCloud, constrained to lie exactly within
the given bounds. Any points outside the bounds will be 'snapped'
to lie *exactly* on the boundary.
Parameters
----------
bounds : ``(n_dims, n_dims)`` tuple of scalars
The bounds to constrain this pointcloud within.
Returns
-------
constrained : :map:`PointCloud`
The constrained pointcloud.
"""
pc = self.copy()
for k in range(pc.n_dims):
tmp = pc.points[:, k]
tmp[tmp < bounds[0][k]] = bounds[0][k]
tmp[tmp > bounds[1][k]] = bounds[1][k]
pc.points[:, k] = tmp
return pc
| grigorisg9gr/menpo | menpo/shape/pointcloud.py | Python | bsd-3-clause | 48,399 |
from typing import List, Iterable, Type
import gzip
import numpy as np
def get_string_vector_reader(dtype: Type = np.float32, columns: int = None):
"""Get a reader for vectors encoded as whitespace-separated numbers"""
def process_line(line: str, lineno: int, path: str) -> np.ndarray:
numbers = line.strip().split()
if columns is not None and len(numbers) != columns:
raise ValueError("Wrong number of columns ({}) on line {}, file {}"
.format(len(numbers), lineno, path))
return np.array(numbers, dtype=dtype)
def reader(files: List[str])-> Iterable[List[np.ndarray]]:
for path in files:
current_line = 0
if path.endswith(".gz"):
with gzip.open(path, 'r') as f_data:
for line in f_data:
current_line += 1
if line.strip():
yield process_line(str(line), current_line, path)
else:
with open(path) as f_data:
for line in f_data:
current_line += 1
if line.strip():
yield process_line(line, current_line, path)
return reader
# pylint: disable=invalid-name
FloatVectorReader = get_string_vector_reader(np.float32)
IntVectorReader = get_string_vector_reader(np.int32)
# pylint: enable=invalid-name
| bastings/neuralmonkey | neuralmonkey/readers/string_vector_reader.py | Python | bsd-3-clause | 1,449 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import djangocms_text_ckeditor.fields
def migrate_club_journal(apps, schema_editor):
import re
ClubJournalEntry = apps.get_model('domecek', 'ClubJournalEntry')
for entry in ClubJournalEntry.objects.all():
entry.agenda = '<p>{}</p>'.format('<br />'.join(entry.agenda.split('\n')))
entry.participants = [reg.participant for reg in entry.registrations.all()]
entry.period = entry.club.periods.get(start__lte=entry.start, end__gte=entry.end)
entry.save()
class Migration(migrations.Migration):
dependencies = [
('domecek', '0011_remove_clubregistration_periods'),
]
operations = [
migrations.AlterField(
model_name='clubjournalentry',
name='agenda',
field=djangocms_text_ckeditor.fields.HTMLField(verbose_name='agenda'),
preserve_default=True,
),
migrations.RenameField(
model_name='clubjournalentry',
old_name='participants',
new_name='registrations',
),
migrations.AddField(
model_name='clubjournalentry',
name='participants',
field=models.ManyToManyField(related_name='journal_entries', verbose_name='participants', to='domecek.Participant', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='clubjournalentry',
name='period',
field=models.ForeignKey(related_name='journal_entries', verbose_name='period', to='domecek.ClubPeriod', null=True),
preserve_default=True,
),
migrations.RunPython(migrate_club_journal),
migrations.AlterField(
model_name='clubjournalentry',
name='period',
field=models.ForeignKey(related_name='journal_entries', verbose_name='period', to='domecek.ClubPeriod'),
preserve_default=True,
),
migrations.RemoveField(
model_name='clubjournalentry',
name='club',
),
migrations.RemoveField(
model_name='clubjournalentry',
name='registrations',
),
]
| misli/django-domecek | domecek/migrations/0012_new_clubjournalentry.py | Python | bsd-3-clause | 2,269 |
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse
from .models import Story
def index(request):
stories = Story.objects.order_by('score')
return render(request, 'reader/stories.html', {'stories': stories})
| n3bukadn3zar/reservo | reservo/reader/views.py | Python | bsd-3-clause | 263 |
__version__ = '0.1dev'
import argparse
import string
import re
from rap.processing_unit import ProcessingUnit
from rap.program import Program, ProgramError
input_pair_regex = re.compile("^\s*([a-zA-Z0-9]+)\s*:\s*([0-9]+)\s*$")
def parse_input(string, sep=',', pair_regex=input_pair_regex):
registers = {}
for pair in string.split(','):
if not pair.strip():
continue
match = pair_regex.match(pair)
if not match:
raise ValueError('ass')
register, value = match.groups()
registers[register] = int(value)
return registers
class Formatter(string.Formatter):
"""Slightly modified string.Formatter.
The differences are:
- field names are considered strings (i.e. only kwargs are used)
- field names / attributes / items that are not found are silently
ignored and their corresponding replacement fields are preserved
- invalid replacement fields are are also silently preserved
"""
def get_field(self, field_name, args, kwargs):
first, rest = string._string.formatter_field_name_split(field_name)
obj = self.get_value(str(first), args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = getattr(obj, i)
else:
obj = obj[str(i)]
return obj, first
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
original_format_spec = format_spec
if literal_text:
result.append(literal_text)
if field_name is not None:
used_args_copy = used_args.copy()
try:
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args_copy.add(arg_used)
obj = self.convert_field(obj, conversion)
format_spec = self._vformat(format_spec, args, kwargs,
used_args_copy, recursion_depth-1)
formatted = self.format_field(obj, format_spec)
result.append(formatted)
used_args.update(used_args_copy)
except (AttributeError, KeyError, ValueError):
result.append("{" + field_name)
if conversion:
result.append("!" + conversion)
if original_format_spec:
result.append(":" + original_format_spec)
result.append("}")
return ''.join(result)
def make_parser():
parser = argparse.ArgumentParser('rap',
description="Register Assembly Programming")
parser.add_argument('file', type=argparse.FileType('r'),
help="a file containing a RAP program")
parser.add_argument('-i', '--input', metavar='input', type=parse_input,
help="set the initial register values (e.g. \"a: 1, b: 2\")")
parser.add_argument('-o', '--output', metavar='format', nargs='?',
const=True, help="""
print the register values after the program ends; if format is
given, register names between braces will be replaced with
their values (e.g. "a: {a}")
""")
parser.add_argument('-t', '--trace', metavar='format', nargs='?',
const=True, help="""
print the register values before every executed instruction;
behaves like --output
""")
parser.add_argument('-s', '--start', metavar='step', type=int,
help="start from instruction step instead of the beginning")
parser.add_argument('-c', '--check', action='store_true',
help="only check the syntax (don't execute the program)")
return parser
def make_printer(what):
if what is None:
return None
if what is True:
return lambda pu: print(pu.registers)
formatter = Formatter()
def printer(pu):
names = dict(pu.registers)
print(formatter.vformat(what, (), names))
return printer
def main(args=None):
parser = make_parser()
args = parser.parse_args(args)
# TODO: validate args.output and args.trace
# TODO: decode character escapes for args.output and args.trace
try:
with args.file as f:
program = Program.load(f)
for error in program.check():
raise error
except ProgramError as e:
parser.error("{} (on line {})".format(e.message, e.line_no))
if args.check:
parser.exit(message=str(program))
if args.start is None:
start = program.start
elif args.start in program:
start = args.start
else:
parser.error("step {} not in program".format(args.start))
trace = make_printer(args.trace)
output = make_printer(args.output)
pu = ProcessingUnit()
pu.registers.update(args.input)
pu.run_program(program, start, trace)
if output:
output(pu)
| lemon24/rap | rap/__init__.py | Python | bsd-3-clause | 5,176 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pygesture/ui/templates/process_widget_template.ui'
#
# Created by: PyQt5 UI code generator 5.4.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ProcessWidget(object):
def setupUi(self, ProcessWidget):
ProcessWidget.setObjectName("ProcessWidget")
ProcessWidget.resize(823, 539)
self.gridLayout_2 = QtWidgets.QGridLayout(ProcessWidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.titleLabel = QtWidgets.QLabel(ProcessWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.titleLabel.sizePolicy().hasHeightForWidth())
self.titleLabel.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.titleLabel.setFont(font)
self.titleLabel.setText("")
self.titleLabel.setAlignment(QtCore.Qt.AlignCenter)
self.titleLabel.setObjectName("titleLabel")
self.verticalLayout.addWidget(self.titleLabel)
self.gridLayout_2.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.groupBox = QtWidgets.QGroupBox(ProcessWidget)
self.groupBox.setMinimumSize(QtCore.QSize(250, 0))
self.groupBox.setObjectName("groupBox")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout.setObjectName("gridLayout")
self.sessionBrowser = SessionBrowser(self.groupBox)
self.sessionBrowser.setObjectName("sessionBrowser")
self.gridLayout.addWidget(self.sessionBrowser, 0, 0, 1, 1)
self.processButton = QtWidgets.QPushButton(self.groupBox)
self.processButton.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.processButton.sizePolicy().hasHeightForWidth())
self.processButton.setSizePolicy(sizePolicy)
self.processButton.setObjectName("processButton")
self.gridLayout.addWidget(self.processButton, 1, 0, 1, 1)
self.progressBar = QtWidgets.QProgressBar(self.groupBox)
self.progressBar.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.progressBar.sizePolicy().hasHeightForWidth())
self.progressBar.setSizePolicy(sizePolicy)
self.progressBar.setMaximum(1)
self.progressBar.setProperty("value", 1)
self.progressBar.setTextVisible(True)
self.progressBar.setOrientation(QtCore.Qt.Horizontal)
self.progressBar.setInvertedAppearance(False)
self.progressBar.setFormat("")
self.progressBar.setObjectName("progressBar")
self.gridLayout.addWidget(self.progressBar, 2, 0, 1, 1)
self.gridLayout_2.addWidget(self.groupBox, 0, 1, 1, 1)
self.retranslateUi(ProcessWidget)
QtCore.QMetaObject.connectSlotsByName(ProcessWidget)
def retranslateUi(self, ProcessWidget):
_translate = QtCore.QCoreApplication.translate
ProcessWidget.setWindowTitle(_translate("ProcessWidget", "Form"))
self.groupBox.setTitle(_translate("ProcessWidget", "Sessions"))
self.processButton.setText(_translate("ProcessWidget", "Process"))
from pygesture.ui.widgets import SessionBrowser
| ixjlyons/pygesture | pygesture/ui/templates/process_widget_template.py | Python | bsd-3-clause | 3,894 |
# -#- coding: utf-8 -#-
import feedparser
from django.db import models
from django.utils.translation import ugettext_lazy as _
from leonardo.module.web.models import ContentProxyWidgetMixin
from leonardo.module.web.models import Widget
from leonardo.module.web.widgets.mixins import JSONContentMixin
from leonardo.module.web.widgets.mixins import ListWidgetMixin
class FeedReaderWidget(Widget, JSONContentMixin, ContentProxyWidgetMixin,
ListWidgetMixin):
max_items = models.IntegerField(_('max. items'), default=5)
class Meta:
abstract = True
verbose_name = _("feed reader")
verbose_name_plural = _('feed readers')
def update_cache_data(self, save=True):
pass
def get_data(self):
feed = feedparser.parse(self.source_address)
entries = feed['entries'][:self.max_items]
return entries
| django-leonardo/django-leonardo | leonardo/module/web/widget/feedreader/models.py | Python | bsd-3-clause | 888 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.utils.iers package provides access to the tables provided by
the International Earth Rotation and Reference Systems Service, in
particular allowing interpolation of published UT1-UTC values for given
times. These are used in `astropy.time` to provide UT1 values. The polar
motions are also used for determining earth orientation for
celestial-to-terrestrial coordinate transformations
(in `astropy.coordinates`).
"""
from warnings import warn
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import numpy as np
from astropy import config as _config
from astropy import units as u
from astropy.table import Table, QTable
from astropy.utils.data import get_pkg_data_filename, clear_download_cache
from astropy import utils
from astropy.utils.exceptions import AstropyWarning
__all__ = ['Conf', 'conf',
'IERS', 'IERS_B', 'IERS_A', 'IERS_Auto',
'FROM_IERS_B', 'FROM_IERS_A', 'FROM_IERS_A_PREDICTION',
'TIME_BEFORE_IERS_RANGE', 'TIME_BEYOND_IERS_RANGE',
'IERS_A_FILE', 'IERS_A_URL', 'IERS_A_URL_MIRROR', 'IERS_A_README',
'IERS_B_FILE', 'IERS_B_URL', 'IERS_B_README',
'IERSRangeError', 'IERSStaleWarning']
# IERS-A default file name, URL, and ReadMe with content description
IERS_A_FILE = 'finals2000A.all'
IERS_A_URL = 'https://maia.usno.navy.mil/ser7/finals2000A.all'
IERS_A_URL_MIRROR = 'https://toshi.nofs.navy.mil/ser7/finals2000A.all'
IERS_A_README = get_pkg_data_filename('data/ReadMe.finals2000A')
# IERS-B default file name, URL, and ReadMe with content description
IERS_B_FILE = get_pkg_data_filename('data/eopc04_IAU2000.62-now')
IERS_B_URL = 'http://hpiers.obspm.fr/iers/eop/eopc04/eopc04_IAU2000.62-now'
IERS_B_README = get_pkg_data_filename('data/ReadMe.eopc04_IAU2000')
# Status/source values returned by IERS.ut1_utc
FROM_IERS_B = 0
FROM_IERS_A = 1
FROM_IERS_A_PREDICTION = 2
TIME_BEFORE_IERS_RANGE = -1
TIME_BEYOND_IERS_RANGE = -2
MJD_ZERO = 2400000.5
INTERPOLATE_ERROR = """\
interpolating from IERS_Auto using predictive values that are more
than {0} days old.
Normally you should not see this error because this class
automatically downloads the latest IERS-A table. Perhaps you are
offline? If you understand what you are doing then this error can be
suppressed by setting the auto_max_age configuration variable to
``None``:
from astropy.utils.iers import conf
conf.auto_max_age = None
"""
def download_file(*args, **kwargs):
"""
Overload astropy.utils.data.download_file within iers module to use a
custom (longer) wait time. This just passes through ``*args`` and
``**kwargs`` after temporarily setting the download_file remote timeout to
the local ``iers.conf.remote_timeout`` value.
"""
with utils.data.conf.set_temp('remote_timeout', conf.remote_timeout):
return utils.data.download_file(*args, **kwargs)
class IERSStaleWarning(AstropyWarning):
pass
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.iers`.
"""
auto_download = _config.ConfigItem(
True,
'Enable auto-downloading of the latest IERS data. If set to False '
'then the local IERS-B file will be used by default. Default is True.')
auto_max_age = _config.ConfigItem(
30.0,
'Maximum age (days) of predictive data before auto-downloading. Default is 30.')
iers_auto_url = _config.ConfigItem(
IERS_A_URL,
'URL for auto-downloading IERS file data.')
iers_auto_url_mirror = _config.ConfigItem(
IERS_A_URL_MIRROR,
'Mirror URL for auto-downloading IERS file data.')
remote_timeout = _config.ConfigItem(
10.0,
'Remote timeout downloading IERS file data (seconds).')
conf = Conf()
class IERSRangeError(IndexError):
"""
Any error for when dates are outside of the valid range for IERS
"""
class IERS(QTable):
"""Generic IERS table class, defining interpolation functions.
Sub-classed from `astropy.table.QTable`. The table should hold columns
'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'.
"""
iers_table = None
@classmethod
def open(cls, file=None, cache=False, **kwargs):
"""Open an IERS table, reading it from a file if not loaded before.
Parameters
----------
file : str or None
full local or network path to the ascii file holding IERS data,
for passing on to the ``read`` class methods (further optional
arguments that are available for some IERS subclasses can be added).
If None, use the default location from the ``read`` class method.
cache : bool
Whether to use cache. Defaults to False, since IERS files
are regularly updated.
Returns
-------
An IERS table class instance
Notes
-----
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table if ``file=None`` (the default).
If a table needs to be re-read from disk, pass on an explicit file
location or use the (sub-class) close method and re-open.
If the location is a network location it is first downloaded via
download_file.
For the IERS class itself, an IERS_B sub-class instance is opened.
"""
if file is not None or cls.iers_table is None:
if file is not None:
if urlparse(file).netloc:
kwargs.update(file=download_file(file, cache=cache))
else:
kwargs.update(file=file)
cls.iers_table = cls.read(**kwargs)
return cls.iers_table
@classmethod
def close(cls):
"""Remove the IERS table from the class.
This allows the table to be re-read from disk during one's session
(e.g., if one finds it is out of date and has updated the file).
"""
cls.iers_table = None
def mjd_utc(self, jd1, jd2=0.):
"""Turn a time to MJD, returning integer and fractional parts.
Parameters
----------
jd1 : float, array, or Time
first part of two-part JD, or Time object
jd2 : float or array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
Returns
-------
mjd : float or array
integer part of MJD
utc : float or array
fractional part of MJD
"""
try: # see if this is a Time object
jd1, jd2 = jd1.utc.jd1, jd1.utc.jd2
except Exception:
pass
mjd = np.floor(jd1 - MJD_ZERO + jd2)
utc = jd1 - (MJD_ZERO+mjd) + jd2
return mjd, utc
def ut1_utc(self, jd1, jd2=0., return_status=False):
"""Interpolate UT1-UTC corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, float array, or Time object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['UT1_UTC'],
self.ut1_utc_source if return_status else None)
def dcip_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate CIP corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, float array, or Time object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD (default 0., ignored if jd1 is Time)
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
D_x : Quantity with angle units
x component of CIP correction for the requested times
D_y : Quantity with angle units
y component of CIP correction for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['dX_2000A', 'dY_2000A'],
self.dcip_source if return_status else None)
def pm_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate polar motions from IERS Table for given dates.
Parameters
----------
jd1 : float, float array, or Time object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
PM_x : Quantity with angle units
x component of polar motion for the requested times
PM_y : Quantity with angle units
y component of polar motion for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['PM_x', 'PM_y'],
self.pm_source if return_status else None)
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""
Check that the indices from interpolation match those after clipping
to the valid table range. This method gets overridden in the IERS_Auto
class because it has different requirements.
"""
if np.any(indices_orig != indices_clipped):
raise IERSRangeError('(some) times are outside of range covered '
'by IERS table.')
def _interpolate(self, jd1, jd2, columns, source=None):
mjd, utc = self.mjd_utc(jd1, jd2)
# enforce array
is_scalar = not hasattr(mjd, '__array__') or mjd.ndim == 0
if is_scalar:
mjd = np.array([mjd])
utc = np.array([utc])
self._refresh_table_as_needed(mjd)
# For typical format, will always find a match (since MJD are integer)
# hence, important to define which side we will be; this ensures
# self['MJD'][i-1]<=mjd<self['MJD'][i]
i = np.searchsorted(self['MJD'].value, mjd, side='right')
# Get index to MJD at or just below given mjd, clipping to ensure we
# stay in range of table (status will be set below for those outside)
i1 = np.clip(i, 1, len(self) - 1)
i0 = i1 - 1
mjd_0, mjd_1 = self['MJD'][i0].value, self['MJD'][i1].value
results = []
for column in columns:
val_0, val_1 = self[column][i0], self[column][i1]
d_val = val_1 - val_0
if column == 'UT1_UTC':
# Check & correct for possible leap second (correcting diff.,
# not 1st point, since jump can only happen right at 2nd point)
d_val -= d_val.round()
# Linearly interpolate (which is what TEMPO does for UT1-UTC, but
# may want to follow IERS gazette #13 for more precise
# interpolation and correction for tidal effects;
# http://maia.usno.navy.mil/iers-gaz13)
val = val_0 + (mjd - mjd_0 + utc) / (mjd_1 - mjd_0) * d_val
# Do not extrapolate outside range, instead just propagate last values.
val[i == 0] = self[column][0]
val[i == len(self)] = self[column][-1]
if is_scalar:
val = val[0]
results.append(val)
if source:
# Set status to source, using the routine passed in.
status = source(i1)
# Check for out of range
status[i == 0] = TIME_BEFORE_IERS_RANGE
status[i == len(self)] = TIME_BEYOND_IERS_RANGE
if is_scalar:
status = status[0]
results.append(status)
return results
else:
self._check_interpolate_indices(i1, i, np.max(mjd))
return results[0] if len(results) == 1 else results
def _refresh_table_as_needed(self, mjd):
"""
Potentially update the IERS table in place depending on the requested
time values in ``mdj`` and the time span of the table. The base behavior
is not to update the table. ``IERS_Auto`` overrides this method.
"""
pass
def ut1_utc_source(self, i):
"""Source for UT1-UTC. To be overridden by subclass."""
return np.zeros_like(i)
def dcip_source(self, i):
"""Source for CIP correction. To be overridden by subclass."""
return np.zeros_like(i)
def pm_source(self, i):
"""Source for polar motion. To be overridden by subclass."""
return np.zeros_like(i)
@property
def time_now(self):
"""
Property to provide the current time, but also allow for explicitly setting
the _time_now attribute for testing purposes.
"""
from astropy.time import Time
try:
return self._time_now
except Exception:
return Time.now()
class IERS_A(IERS):
"""IERS Table class targeted to IERS A, provided by USNO.
These include rapid turnaround and predicted times.
See http://maia.usno.navy.mil/
Notes
-----
The IERS A file is not part of astropy. It can be downloaded from
``iers.IERS_A_URL`` or ``iers.IERS_A_URL_MIRROR``. See ``iers.__doc__``
for instructions on use in ``Time``, etc.
"""
iers_table = None
@classmethod
def _combine_a_b_columns(cls, iers_a):
"""
Return a new table with appropriate combination of IERS_A and B columns.
"""
# IERS A has some rows at the end that hold nothing but dates & MJD
# presumably to be filled later. Exclude those a priori -- there
# should at least be a predicted UT1-UTC and PM!
table = iers_a[~iers_a['UT1_UTC_A'].mask &
~iers_a['PolPMFlag_A'].mask]
# This does nothing for IERS_A, but allows IERS_Auto to ensure the
# IERS B values in the table are consistent with the true ones.
table = cls._substitute_iers_b(table)
# Run np.where on the data from the table columns, since in numpy 1.9
# it otherwise returns an only partially initialized column.
table['UT1_UTC'] = np.where(table['UT1_UTC_B'].mask,
table['UT1_UTC_A'].data,
table['UT1_UTC_B'].data)
# Ensure the unit is correct, for later column conversion to Quantity.
table['UT1_UTC'].unit = table['UT1_UTC_A'].unit
table['UT1Flag'] = np.where(table['UT1_UTC_B'].mask,
table['UT1Flag_A'].data,
'B')
# Repeat for polar motions.
table['PM_x'] = np.where(table['PM_X_B'].mask,
table['PM_x_A'].data,
table['PM_X_B'].data)
table['PM_x'].unit = table['PM_x_A'].unit
table['PM_y'] = np.where(table['PM_Y_B'].mask,
table['PM_y_A'].data,
table['PM_Y_B'].data)
table['PM_y'].unit = table['PM_y_A'].unit
table['PolPMFlag'] = np.where(table['PM_X_B'].mask,
table['PolPMFlag_A'].data,
'B')
table['dX_2000A'] = np.where(table['dX_2000A_B'].mask,
table['dX_2000A_A'].data,
table['dX_2000A_B'].data)
table['dX_2000A'].unit = table['dX_2000A_A'].unit
table['dY_2000A'] = np.where(table['dY_2000A_B'].mask,
table['dY_2000A_A'].data,
table['dY_2000A_B'].data)
table['dY_2000A'].unit = table['dY_2000A_A'].unit
table['NutFlag'] = np.where(table['dX_2000A_B'].mask,
table['NutFlag_A'].data,
'B')
# Get the table index for the first row that has predictive values
# PolPMFlag_A IERS (I) or Prediction (P) flag for
# Bull. A polar motion values
# UT1Flag_A IERS (I) or Prediction (P) flag for
# Bull. A UT1-UTC values
is_predictive = (table['UT1Flag_A'] == 'P') | (table['PolPMFlag_A'] == 'P')
table.meta['predictive_index'] = np.min(np.flatnonzero(is_predictive))
table.meta['predictive_mjd'] = table['MJD'][table.meta['predictive_index']]
return table
@classmethod
def _substitute_iers_b(cls, table):
# See documentation in IERS_Auto.
return table
@classmethod
def read(cls, file=None, readme=None):
"""Read IERS-A table from a finals2000a.* file provided by USNO.
Parameters
----------
file : str
full path to ascii file holding IERS-A data.
Defaults to ``iers.IERS_A_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_A_README``.
Returns
-------
``IERS_A`` class instance
"""
if file is None:
file = IERS_A_FILE
if readme is None:
readme = IERS_A_README
# Read in as a regular Table, including possible masked columns.
# Columns will be filled and converted to Quantity in cls.__init__.
iers_a = Table.read(file, format='cds', readme=readme)
iers_a = Table(iers_a, masked=True, copy=False)
# Combine the A and B data for UT1-UTC and PM columns
table = cls._combine_a_b_columns(iers_a)
table.meta['data_path'] = file
table.meta['readme_path'] = readme
# Fill any masked values, and convert to a QTable.
return cls(table.filled())
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
ut1flag = self['UT1Flag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[ut1flag == 'I'] = FROM_IERS_A
source[ut1flag == 'P'] = FROM_IERS_A_PREDICTION
return source
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
nutflag = self['NutFlag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[nutflag == 'I'] = FROM_IERS_A
source[nutflag == 'P'] = FROM_IERS_A_PREDICTION
return source
def pm_source(self, i):
"""Set polar motion source flag for entries in IERS table"""
pmflag = self['PolPMFlag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[pmflag == 'I'] = FROM_IERS_A
source[pmflag == 'P'] = FROM_IERS_A_PREDICTION
return source
class IERS_B(IERS):
"""IERS Table class targeted to IERS B, provided by IERS itself.
These are final values; see http://www.iers.org/
Notes
-----
If the package IERS B file (```iers.IERS_B_FILE``) is out of date, a new
version can be downloaded from ``iers.IERS_B_URL``.
"""
iers_table = None
@classmethod
def read(cls, file=None, readme=None, data_start=14):
"""Read IERS-B table from a eopc04_iau2000.* file provided by IERS.
Parameters
----------
file : str
full path to ascii file holding IERS-B data.
Defaults to package version, ``iers.IERS_B_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_B_README``.
data_start : int
starting row. Default is 14, appropriate for standard IERS files.
Returns
-------
``IERS_B`` class instance
"""
if file is None:
file = IERS_B_FILE
if readme is None:
readme = IERS_B_README
# Read in as a regular Table, including possible masked columns.
# Columns will be filled and converted to Quantity in cls.__init__.
iers_b = Table.read(file, format='cds', readme=readme,
data_start=data_start)
return cls(iers_b.filled())
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def pm_source(self, i):
"""Set PM source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
class IERS_Auto(IERS_A):
"""
Provide most-recent IERS data and automatically handle downloading
of updated values as necessary.
"""
iers_table = None
@classmethod
def open(cls):
"""If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to True (default), then open a recent version of the IERS-A
table with predictions for UT1-UTC and polar motion out to
approximately one year from now. If the available version of this file
is older than ``astropy.utils.iers.conf.auto_max_age`` days old
(or non-existent) then it will be downloaded over the network and cached.
If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to False then ``astropy.utils.iers.IERS()`` is returned. This
is normally the IERS-B table that is supplied with astropy.
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table.
Returns
-------
`~astropy.table.QTable` instance with IERS (Earth rotation) data columns
"""
if not conf.auto_download:
cls.iers_table = IERS.open()
return cls.iers_table
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
if cls.iers_table is not None:
# If the URL has changed, we need to redownload the file, so we
# should ignore the internally cached version.
if cls.iers_table.meta.get('data_url') in all_urls:
return cls.iers_table
dl_success = False
err_list = []
for url in all_urls:
try:
filename = download_file(url, cache=True)
except Exception as err:
err_list.append(str(err))
else:
dl_success = True
break
if not dl_success:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(AstropyWarning('failed to download {}, using local IERS-B: {}'
.format(' and '.join(all_urls),
';'.join(err_list)))) # noqa
cls.iers_table = IERS.open()
return cls.iers_table
cls.iers_table = cls.read(file=filename)
cls.iers_table.meta['data_url'] = str(url)
return cls.iers_table
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""Check that the indices from interpolation match those after clipping to the
valid table range. The IERS_Auto class is exempted as long as it has
sufficiently recent available data so the clipped interpolation is
always within the confidence bounds of current Earth rotation
knowledge.
"""
predictive_mjd = self.meta['predictive_mjd']
# See explanation in _refresh_table_as_needed for these conditions
auto_max_age = (conf.auto_max_age if conf.auto_max_age is not None
else np.finfo(float).max)
if (max_input_mjd > predictive_mjd and
self.time_now.mjd - predictive_mjd > auto_max_age):
raise ValueError(INTERPOLATE_ERROR.format(auto_max_age))
def _refresh_table_as_needed(self, mjd):
"""Potentially update the IERS table in place depending on the requested
time values in ``mjd`` and the time span of the table.
For IERS_Auto the behavior is that the table is refreshed from the IERS
server if both the following apply:
- Any of the requested IERS values are predictive. The IERS-A table
contains predictive data out for a year after the available
definitive values.
- The first predictive values are at least ``conf.auto_max_age days`` old.
In other words the IERS-A table was created by IERS long enough
ago that it can be considered stale for predictions.
"""
max_input_mjd = np.max(mjd)
now_mjd = self.time_now.mjd
# IERS-A table contains predictive data out for a year after
# the available definitive values.
fpi = self.meta['predictive_index']
predictive_mjd = self.meta['predictive_mjd']
# Update table in place if necessary
auto_max_age = (conf.auto_max_age if conf.auto_max_age is not None
else np.finfo(float).max)
# If auto_max_age is smaller than IERS update time then repeated downloads may
# occur without getting updated values (giving a IERSStaleWarning).
if auto_max_age < 10:
raise ValueError('IERS auto_max_age configuration value must be larger than 10 days')
if (max_input_mjd > predictive_mjd and
now_mjd - predictive_mjd > auto_max_age):
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
dl_success = False
err_list = []
# Get the latest version
for url in all_urls:
try:
clear_download_cache(url)
filename = download_file(url, cache=True)
except Exception as err:
err_list.append(str(err))
else:
dl_success = True
break
if not dl_success:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(AstropyWarning('failed to download {}: {}.\nA coordinate or time-related '
'calculation might be compromised or fail because the dates are '
'not covered by the available IERS file. See the '
'"IERS data access" section of the astropy documentation '
'for additional information on working offline.'
.format(' and '.join(all_urls), ';'.join(err_list))))
return
new_table = self.__class__.read(file=filename)
new_table.meta['data_url'] = str(url)
# New table has new values?
if new_table['MJD'][-1] > self['MJD'][-1]:
# Replace *replace* current values from the first predictive index through
# the end of the current table. This replacement is much faster than just
# deleting all rows and then using add_row for the whole duration.
new_fpi = np.searchsorted(new_table['MJD'].value, predictive_mjd, side='right')
n_replace = len(self) - fpi
self[fpi:] = new_table[new_fpi:new_fpi + n_replace]
# Sanity check for continuity
if new_table['MJD'][new_fpi + n_replace] - self['MJD'][-1] != 1.0 * u.d:
raise ValueError('unexpected gap in MJD when refreshing IERS table')
# Now add new rows in place
for row in new_table[new_fpi + n_replace:]:
self.add_row(row)
self.meta.update(new_table.meta)
else:
warn(IERSStaleWarning(
'IERS_Auto predictive values are older than {} days but downloading '
'the latest table did not find newer values'.format(conf.auto_max_age)))
@classmethod
def _substitute_iers_b(cls, table):
"""Substitute IERS B values with those from a real IERS B table.
IERS-A has IERS-B values included, but for reasons unknown these
do not match the latest IERS-B values (see comments in #4436).
Here, we use the bundled astropy IERS-B table to overwrite the values
in the downloaded IERS-A table.
"""
iers_b = IERS_B.open()
# Substitute IERS-B values for existing B values in IERS-A table
mjd_b = table['MJD'][~table['UT1_UTC_B'].mask]
i0 = np.searchsorted(iers_b['MJD'].value, mjd_b[0], side='left')
i1 = np.searchsorted(iers_b['MJD'].value, mjd_b[-1], side='right')
iers_b = iers_b[i0:i1]
n_iers_b = len(iers_b)
# If there is overlap then replace IERS-A values from available IERS-B
if n_iers_b > 0:
# Sanity check that we are overwriting the correct values
if not np.allclose(table['MJD'][:n_iers_b], iers_b['MJD'].value):
raise ValueError('unexpected mismatch when copying '
'IERS-B values into IERS-A table.')
# Finally do the overwrite
table['UT1_UTC_B'][:n_iers_b] = iers_b['UT1_UTC'].value
table['PM_X_B'][:n_iers_b] = iers_b['PM_x'].value
table['PM_Y_B'][:n_iers_b] = iers_b['PM_y'].value
return table
# by default for IERS class, read IERS-B table
IERS.read = IERS_B.read
| bsipocz/astropy | astropy/utils/iers/iers.py | Python | bsd-3-clause | 31,536 |
from helper import CompatTestCase
from validator.compat import FX22_DEFINITION
class TestFX22Compat(CompatTestCase):
"""Test that compatibility tests for Gecko 22 are properly executed."""
VERSION = FX22_DEFINITION
def test_nsigh2(self):
self.run_regex_for_compat("nsIGlobalHistory2", is_js=True)
self.assert_silent()
self.assert_compat_error(type_="warning")
def test_nsilms(self):
self.run_regex_for_compat("nsILivemarkService", is_js=True)
self.assert_silent()
self.assert_compat_error(type_="warning")
def test_mpat(self):
self.run_regex_for_compat("markPageAsTyped", is_js=True)
self.assert_silent()
self.assert_compat_error(type_="warning")
def test_favicon(self):
self.run_regex_for_compat("setFaviconUrlForPage", is_js=True)
self.assert_silent()
self.assert_compat_error(type_="warning")
def test_nsitv(self):
self.run_regex_for_compat("getRowProperties", is_js=True)
self.assert_silent()
self.assert_compat_error(type_="warning")
def test_nsipb(self):
self.run_regex_for_compat("nsIPrivateBrowsingService", is_js=True)
self.assert_silent()
self.assert_compat_error(type_="warning")
def test_fullZoom(self):
self.run_regex_for_compat("fullZoom", is_js=True)
self.assert_silent()
self.assert_compat_error()
def test_userdata(self):
self.run_regex_for_compat("getUserData", is_js=True)
self.assert_silent()
self.assert_compat_warning()
def test_tooltip(self):
self.run_regex_for_compat("FillInHTMLTooltip", is_js=True)
self.assert_silent()
self.assert_compat_warning()
| mattbasta/amo-validator | tests/compat/test_gecko22.py | Python | bsd-3-clause | 1,747 |
from pylons import tmpl_context as c
from pylons import app_globals as g
from pylons.i18n import _
from r2.config import feature
from r2.controllers import add_controller
from r2.controllers.reddit_base import RedditController
from r2.lib.errors import errors
from r2.lib.require import require, RequirementException
from r2.lib.validator import (
json_validate,
validate,
validatedForm,
VBoolean,
VExistingUname,
VGold,
VJSON,
VModhash,
VUser,
)
from reddit_gold.models import SnoovatarsByAccount
from reddit_gold.pages import (
GoldInfoPage,
Snoovatar,
SnoovatarProfilePage,
)
from reddit_gold.validators import VSnooColor
@add_controller
class GoldController(RedditController):
def GET_about(self):
return GoldInfoPage(
_("gold"),
show_sidebar=False,
page_classes=["gold-page-ga-tracking"]
).render()
def GET_partners(self):
self.redirect("/gold/about", code=301)
@validate(
vuser=VExistingUname("username"),
)
def GET_snoovatar(self, vuser):
if not vuser or vuser._deleted or not vuser.gold:
self.abort404()
snoovatar = SnoovatarsByAccount.load(vuser, "snoo")
user_is_owner = c.user_is_loggedin and c.user == vuser
if not user_is_owner:
if not snoovatar or not snoovatar["public"]:
self.abort404()
return SnoovatarProfilePage(
user=vuser,
content=Snoovatar(
editable=user_is_owner,
snoovatar=snoovatar,
username=vuser.name,
),
).render()
@add_controller
class GoldApiController(RedditController):
@validatedForm(
VUser(),
VGold(),
VModhash(),
public=VBoolean("public"),
snoo_color=VSnooColor("snoo_color"),
unvalidated_components=VJSON("components"),
)
def POST_snoovatar(self, form, jquery, public, snoo_color, unvalidated_components):
if form.has_errors("components",
errors.NO_TEXT,
errors.TOO_LONG,
errors.BAD_STRING,
):
return
if form.has_errors("snoo_color", errors.BAD_CSS_COLOR):
return
try:
tailors = g.plugins["gold"].tailors_data
validated = {}
for tailor in tailors:
tailor_name = tailor["name"]
component = unvalidated_components.get(tailor_name)
# if the tailor requires a selection, ensure there is one
if not tailor["allow_clear"]:
require(component)
# ensure this dressing exists
dressing = component.get("dressingName")
if dressing:
for d in tailor["dressings"]:
if dressing == d["name"]:
break
else:
raise RequirementException
validated[tailor_name] = component
except RequirementException:
c.errors.add(errors.INVALID_SNOOVATAR, field="components")
form.has_errors("components", errors.INVALID_SNOOVATAR)
return
SnoovatarsByAccount.save(
user=c.user,
name="snoo",
public=public,
snoo_color=snoo_color,
components=validated,
)
| madbook/reddit-plugin-gold | reddit_gold/controllers.py | Python | bsd-3-clause | 3,510 |
from django.http import HttpResponse
from error_capture_middleware import ErrorCaptureHandler
class PlainExceptionsMiddleware(ErrorCaptureHandler):
def handle(self, request, exception, tb):
return HttpResponse("\n".join(tb),
content_type="text/plain", status=500) | enderlabs/django-error-capture-middleware | src/error_capture_middleware/handlers/plain.py | Python | bsd-3-clause | 306 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import random
import unittest
from infra_libs.event_mon import router
from infra_libs.event_mon.log_request_lite_pb2 import LogRequestLite
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
class RouterTests(unittest.TestCase):
def test_smoke(self):
# Use dry_run to avoid code that deals with http (including auth).
r = router._Router({}, endpoint=None)
self.assertTrue(r.close())
def test_smoke_with_credentials(self):
cache = {'service_account_creds':
os.path.join(DATA_DIR, 'valid_creds.json'),
'service_accounts_creds_root': 'whatever.the/other/is/absolute'}
r = router._Router(cache, endpoint='https://any.where')
self.assertTrue(r.close())
def test_push_smoke(self):
r = router._Router({}, endpoint=None)
req = LogRequestLite.LogEventLite()
req.event_time_ms = router.time_ms()
req.event_code = 1
req.event_flow_id = 2
r.push_event(req)
self.assertTrue(r.close())
def test_push_error_handling(self):
r = router._Router({}, endpoint=None)
r.push_event(None)
self.assertTrue(r.close())
class BackoffTest(unittest.TestCase):
def test_backoff_time_first_value(self):
t = router.backoff_time(attempt=0, retry_backoff=2.)
random.seed(0)
self.assertTrue(1.5 <= t <= 2.5)
def test_backoff_time_max_value(self):
t = router.backoff_time(attempt=10, retry_backoff=2., max_delay=5)
self.assertTrue(abs(t - 5.) < 0.0001)
| nicko96/Chrome-Infra | infra_libs/event_mon/test/router_test.py | Python | bsd-3-clause | 1,648 |
# -*- coding: utf-8 -*-
from django.db import models, migrations
import django.utils.timezone
import django.contrib.auth.models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status', default=False)),
('username', models.CharField(max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], verbose_name='username', error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True)),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(help_text='Designates whether the user can log into this admin site.', verbose_name='staff status', default=False)),
('is_active', models.BooleanField(help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active', default=True)),
('date_joined', models.DateTimeField(verbose_name='date joined', default=django.utils.timezone.now)),
('groups', models.ManyToManyField(related_name='user_set', blank=True, verbose_name='groups', to='auth.Group', help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_query_name='user')),
('user_permissions', models.ManyToManyField(related_name='user_set', blank=True, verbose_name='user permissions', to='auth.Permission', help_text='Specific permissions for this user.', related_query_name='user')),
('name', models.CharField(max_length=255, verbose_name='Name of User', blank=True)),
],
options={
'verbose_name': 'user',
'abstract': False,
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| GeoMatDigital/django-geomat | geomat/users/migrations/0001_initial.py | Python | bsd-3-clause | 3,014 |
from enum import Enum
from django.conf import settings
from django.utils.translation import npgettext_lazy, pgettext_lazy
from django_prices.templatetags import prices_i18n
from prices import Money
class OrderStatus:
DRAFT = 'draft'
UNFULFILLED = 'unfulfilled'
PARTIALLY_FULFILLED = 'partially fulfilled'
FULFILLED = 'fulfilled'
CANCELED = 'canceled'
CHOICES = [
(DRAFT, pgettext_lazy(
'Status for a fully editable, not confirmed order created by '
'staff users',
'Draft')),
(UNFULFILLED, pgettext_lazy(
'Status for an order with no items marked as fulfilled',
'Unfulfilled')),
(PARTIALLY_FULFILLED, pgettext_lazy(
'Status for an order with some items marked as fulfilled',
'Partially fulfilled')),
(FULFILLED, pgettext_lazy(
'Status for an order with all items marked as fulfilled',
'Fulfilled')),
(CANCELED, pgettext_lazy(
'Status for a permanently canceled order',
'Canceled'))]
class FulfillmentStatus:
FULFILLED = 'fulfilled'
CANCELED = 'canceled'
CHOICES = [
(FULFILLED, pgettext_lazy(
'Status for a group of products in an order marked as fulfilled',
'Fulfilled')),
(CANCELED, pgettext_lazy(
'Status for a fulfilled group of products in an order marked '
'as canceled',
'Canceled'))]
class OrderEvents(Enum):
PLACED = 'placed'
PLACED_FROM_DRAFT = 'draft_placed'
OVERSOLD_ITEMS = 'oversold_items'
ORDER_MARKED_AS_PAID = 'marked_as_paid'
CANCELED = 'canceled'
ORDER_FULLY_PAID = 'order_paid'
UPDATED = 'updated'
EMAIL_SENT = 'email_sent'
PAYMENT_CAPTURED = 'captured'
PAYMENT_REFUNDED = 'refunded'
PAYMENT_VOIDED = 'voided'
FULFILLMENT_CANCELED = 'fulfillment_canceled'
FULFILLMENT_RESTOCKED_ITEMS = 'restocked_items'
FULFILLMENT_FULFILLED_ITEMS = 'fulfilled_items'
TRACKING_UPDATED = 'tracking_updated'
NOTE_ADDED = 'note_added'
# Used mostly for importing legacy data from before Enum-based events
OTHER = 'other'
class OrderEventsEmails(Enum):
PAYMENT = 'payment_confirmation'
SHIPPING = 'shipping_confirmation'
ORDER = 'order_confirmation'
FULFILLMENT = 'fulfillment_confirmation'
EMAIL_CHOICES = {
OrderEventsEmails.PAYMENT.value: pgettext_lazy(
'Email type', 'Payment confirmation'),
OrderEventsEmails.SHIPPING.value: pgettext_lazy(
'Email type', 'Shipping confirmation'),
OrderEventsEmails.FULFILLMENT.value: pgettext_lazy(
'Email type', 'Fulfillment confirmation'),
OrderEventsEmails.ORDER.value: pgettext_lazy(
'Email type', 'Order confirmation')}
def get_money_from_params(amount):
"""Money serialization changed at one point, as for now it's serialized
as a dict. But we keep those settings for the legacy data.
Can be safely removed after migrating to Dashboard 2.0
"""
if isinstance(amount, Money):
return amount
if isinstance(amount, dict):
return Money(amount=amount['amount'], currency=amount['currency'])
return Money(amount, settings.DEFAULT_CURRENCY)
def display_order_event(order_event):
"""This function is used to keep the backwards compatibility
with the old dashboard and new type of order events
(storing enums instead of messages)
"""
event_type = order_event.type
params = order_event.parameters
if event_type == OrderEvents.PLACED_FROM_DRAFT.value:
return pgettext_lazy(
'Dashboard message related to an order',
'Order created from draft order by %(user_name)s' % {
'user_name': order_event.user})
if event_type == OrderEvents.PAYMENT_VOIDED.value:
return pgettext_lazy(
'Dashboard message related to an order',
'Payment was voided by %(user_name)s' % {
'user_name': order_event.user})
if event_type == OrderEvents.PAYMENT_REFUNDED.value:
amount = get_money_from_params(params['amount'])
return pgettext_lazy(
'Dashboard message related to an order',
'Successfully refunded: %(amount)s' % {
'amount': prices_i18n.amount(amount)})
if event_type == OrderEvents.PAYMENT_CAPTURED.value:
amount = get_money_from_params(params['amount'])
return pgettext_lazy(
'Dashboard message related to an order',
'Successfully captured: %(amount)s' % {
'amount': prices_i18n.amount(amount)})
if event_type == OrderEvents.ORDER_MARKED_AS_PAID.value:
return pgettext_lazy(
'Dashboard message related to an order',
'Order manually marked as paid by %(user_name)s' % {
'user_name': order_event.user})
if event_type == OrderEvents.CANCELED.value:
return pgettext_lazy(
'Dashboard message related to an order',
'Order was canceled by %(user_name)s' % {
'user_name': order_event.user})
if event_type == OrderEvents.FULFILLMENT_RESTOCKED_ITEMS.value:
return npgettext_lazy(
'Dashboard message related to an order',
'We restocked %(quantity)d item',
'We restocked %(quantity)d items',
number='quantity') % {'quantity': params['quantity']}
if event_type == OrderEvents.NOTE_ADDED.value:
return pgettext_lazy(
'Dashboard message related to an order',
'%(user_name)s added note: %(note)s' % {
'note': params['message'],
'user_name': order_event.user})
if event_type == OrderEvents.FULFILLMENT_CANCELED.value:
return pgettext_lazy(
'Dashboard message',
'Fulfillment #%(fulfillment)s canceled by %(user_name)s') % {
'fulfillment': params['composed_id'],
'user_name': order_event.user}
if event_type == OrderEvents.FULFILLMENT_FULFILLED_ITEMS.value:
return npgettext_lazy(
'Dashboard message related to an order',
'Fulfilled %(quantity_fulfilled)d item',
'Fulfilled %(quantity_fulfilled)d items',
number='quantity_fulfilled') % {
'quantity_fulfilled': params['quantity']}
if event_type == OrderEvents.PLACED.value:
return pgettext_lazy(
'Dashboard message related to an order',
'Order was placed')
if event_type == OrderEvents.ORDER_FULLY_PAID.value:
return pgettext_lazy(
'Dashboard message related to an order',
'Order was fully paid')
if event_type == OrderEvents.EMAIL_SENT.value:
return pgettext_lazy(
'Dashboard message related to an order',
'%(email_type)s email was sent to the customer '
'(%(email)s)') % {
'email_type': EMAIL_CHOICES[params['email_type']],
'email': params['email']}
if event_type == OrderEvents.UPDATED.value:
return pgettext_lazy(
'Dashboard message related to an order',
'Order details were updated by %(user_name)s' % {
'user_name': order_event.user})
if event_type == OrderEvents.TRACKING_UPDATED.value:
return pgettext_lazy(
'Dashboard message related to an order',
'Fulfillment #%(fulfillment)s tracking was updated to'
' %(tracking_number)s by %(user_name)s') % {
'fulfillment': params['composed_id'],
'tracking_number': params['tracking_number'],
'user_name': order_event.user}
if event_type == OrderEvents.OVERSOLD_ITEMS.value:
return npgettext_lazy(
'Dashboard message related to an order',
'%(quantity)d line item oversold on this order.',
'%(quantity)d line items oversold on this order.',
number='quantity') % {
'quantity': len(params['oversold_items'])}
if event_type == OrderEvents.OTHER.value:
return order_event.parameters['message']
raise ValueError('Not supported event type: %s' % (event_type))
| UITools/saleor | saleor/order/__init__.py | Python | bsd-3-clause | 8,246 |
from pprint import pprint as pp
from scout.load.transcript import load_transcripts
def test_load_transcripts(adapter, gene_bulk, transcripts_handle):
# GIVEN a empty database
assert sum(1 for i in adapter.all_genes()) == 0
assert sum(1 for i in adapter.transcripts()) == 0
# WHEN inserting a number of genes and some transcripts
adapter.load_hgnc_bulk(gene_bulk)
load_transcripts(adapter, transcripts_lines=transcripts_handle, build="37")
# THEN assert all genes have been added to the database
assert sum(1 for i in adapter.all_genes()) == len(gene_bulk)
# THEN assert that the transcripts where loaded loaded
assert sum(1 for i in adapter.transcripts()) > 0
| Clinical-Genomics/scout | tests/load/test_load_transcripts.py | Python | bsd-3-clause | 707 |
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from casepro.test import BaseCasesTest
class OrgExtCRUDLTest(BaseCasesTest):
def test_home(self):
url = reverse('orgs_ext.org_home')
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
| praekelt/helpdesk | casepro/orgs_ext/tests.py | Python | bsd-3-clause | 382 |
from smtplib import SMTPException
from django.conf import settings
from django.core.mail import send_mail
from django.views.decorators.http import require_GET
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.utils import timezone
@login_required
def send_account_verification(request):
"""
* if already verified => returns message: info: verification already completed
* if unverified =>
* if no verification code: set it and save UserDetails
* send the verification email and returns message: info: email sent, please check
"""
userdetails = request.user.userdetails
if userdetails.verification_completed:
messages.info(request, "Your email address is already verified.")
else:
userdetails.reset_verification()
code = userdetails.verification_code
subject = 'Mind My Health Email Verification'
domain = request.get_host()
try:
send_mail(
subject,
render_to_string(
'myhpom/accounts/verification_email.txt',
context={
'code': code,
'domain': domain
},
request=request,
),
settings.DEFAULT_FROM_EMAIL,
[request.user.email],
fail_silently=False,
)
messages.info(request, "Please check your email to verify your address.")
except SMTPException:
messages.error(
request, "The verification email could not be sent. Please try again later."
)
userdetails.save()
return redirect('myhpom:dashboard')
@require_GET
@login_required
def verify_account(request, code):
"""This URL is usually accessed from an email. Login will redirect here if needed.
* if already verified => returns message: success: verification already completed
* if not verified: Check the given code against the user's verification code
* if match:
* set verification_completed as now and save UserDetails
* message: success: email verified
* if not match:
* message: invalid: the verification code is invalid.
"""
userdetails = request.user.userdetails
if userdetails.verification_completed:
messages.info(request, "Your email address is already verified.")
else:
if code == userdetails.verification_code:
userdetails.verification_completed = timezone.now()
userdetails.save()
messages.success(request, "Your email address is now verified.")
else:
messages.error(request, "The verification code is invalid.")
return redirect('myhpom:dashboard')
| ResearchSoftwareInstitute/MyHPOM | myhpom/views/verification.py | Python | bsd-3-clause | 2,927 |
# -*- coding: utf-8 -*-
#
# django cms documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 15 10:47:03 2009.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out serve
# to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it absolute,
# like shown here.
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.join(os.path.abspath('.'), '_ext'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.autodoc']
extensions = ['djangocms', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.autodoc']
intersphinx_mapping = {
'python': ('http://docs.python.org/3/', None),
'django': ('https://docs.djangoproject.com/en/1.10/', 'https://docs.djangoproject.com/en/1.10/_objects/'),
'classytags': ('http://readthedocs.org/docs/django-classy-tags/en/latest/', None),
'sekizai': ('http://readthedocs.org/docs/django-sekizai/en/latest/', None),
'treebeard': ('http://django-treebeard.readthedocs.io/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django cms'
copyright = u'2009-2017, Divio AG and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
path = os.path.split(os.path.dirname(__file__))[0]
path = os.path.split(path)[0]
sys.path.insert(0, path)
import cms
version = cms.__version__
# The full version, including alpha/beta/rc tags.
release = cms.__version__
# The language for content autogenerated by Sphinx. Refer to documentation for
# a list of supported languages.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', 'env']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description unit
# titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
todo_include_todos = True
# -- Options for HTML output ---------------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
try:
import divio_docs_theme
html_theme = 'divio_docs_theme'
html_theme_path = [divio_docs_theme.get_html_theme_path()]
except:
html_theme = 'default'
show_cloud_banner = True
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'djangocmsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'djangocms.tex', u'django cms Documentation',
u'Divio AG and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top
# of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# -- Options for LaTeX output --------------------------------------------------
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
# temporarily disabled because of an issue on RTD. see docs/requirements.txt
# if 'spelling' in sys.argv:
# extensions.append("sphinxcontrib.spelling")
# Spelling language.
spelling_lang = 'en_GB'
# Location of word list.
spelling_word_list_filename = 'spelling_wordlist'
spelling_ignore_pypi_package_names = True
| czpython/django-cms | docs/conf.py | Python | bsd-3-clause | 8,185 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 DAVY Guillaume
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe Sorts, détaillée plus bas."""
from abstraits.obase import BaseObj
from .sort import Sort
class Sorts(BaseObj):
"""Classe-conteneur des sorts.
Cette classe contient tous les sortilèges et autres maléfices de
l'univers, éditables et utilisables, et offre quelques méthodes de
manipulation.
Voir : ./sort.py
"""
enregistrer = True
def __init__(self):
"""Constructeur du conteneur"""
BaseObj.__init__(self)
self.__sorts = {}
def __getnewargs__(self):
return ()
def __contains__(self, cle):
"""Renvoie True si le sort existe, False sinon"""
return cle in self.__sorts
def __len__(self):
return len(self.__sorts)
def __getitem__(self, cle):
"""Renvoie un sort à partir de sa clé"""
return self.__sorts[cle]
def __setitem__(self, cle, sort):
"""Ajoute un sort à la liste"""
self.__sorts[cle] = sort
def __delitem__(self, cle):
"""Détruit le sort spécifié"""
del self.__sorts[cle]
def values(self):
return self.__sorts.values()
def ajouter_ou_modifier(self, cle):
"""Ajoute un sort ou le renvoie si existant"""
if cle in self.__sorts:
return self.__sorts[cle]
else:
sort = Sort(cle, self)
self.__sorts[cle] = sort
return sort
def get(self, cle, valeur=None):
"""Retourne le sort ou valeur si non présent."""
return self.__sorts.get(cle, valeur)
| stormi/tsunami | src/secondaires/magie/sorts.py | Python | bsd-3-clause | 3,125 |
from tests.modules.ffi.base import BaseFFITest
from rpython.rtyper.lltypesystem import rffi
# Most of the stuff is still very vague.
# This is because lots of the constants had to be set to something in order to
# run some specs but the specs weren't about them.
class TestTypeDefs(BaseFFITest):
def test_it_is_kind_of_a_Hash(self, space):
assert self.ask(space, 'FFI::TypeDefs.kind_of? Hash')
class TestTypes(BaseFFITest):
def test_it_is_kind_of_a_Hash(self, space):
assert self.ask(space, 'FFI::Types.kind_of? Hash')
class TestPlatform(BaseFFITest):
def test_it_is_a_Module(self, space):
assert self.ask(space, "FFI::Platform.is_a? Module")
def test_it_offers_some_SIZE_constants(self, space):
w_res = space.execute('FFI::Platform::INT8_SIZE')
assert space.int_w(w_res) == rffi.sizeof(rffi.CHAR)
w_res = space.execute('FFI::Platform::INT16_SIZE')
assert space.int_w(w_res) == rffi.sizeof(rffi.SHORT)
w_res = space.execute('FFI::Platform::INT32_SIZE')
assert space.int_w(w_res) == rffi.sizeof(rffi.INT)
w_res = space.execute('FFI::Platform::INT64_SIZE')
assert space.int_w(w_res) == rffi.sizeof(rffi.LONGLONG)
w_res = space.execute('FFI::Platform::LONG_SIZE')
assert space.int_w(w_res) == rffi.sizeof(rffi.LONG)
w_res = space.execute('FFI::Platform::FLOAT_SIZE')
assert space.int_w(w_res) == rffi.sizeof(rffi.FLOAT)
w_res = space.execute('FFI::Platform::DOUBLE_SIZE')
assert space.int_w(w_res) == rffi.sizeof(rffi.DOUBLE)
w_res = space.execute('FFI::Platform::ADDRESS_SIZE')
assert space.int_w(w_res) == rffi.sizeof(rffi.VOIDP)
class TestStructLayout(BaseFFITest):
def test_it_is_a_class(self, space):
assert self.ask(space, "FFI::StructLayout.is_a? Class")
def test_its_Field_constant_is_nil(self, space):
assert self.ask(space, "FFI::StructLayout::Field.nil?")
class TestStructByReference(BaseFFITest):
def test_it_is_a_class(self, space):
assert self.ask(space, "FFI::StructByReference.is_a? Class")
class TestNullPointerError(BaseFFITest):
def test_it_inherits_from_Exception(self, space):
assert self.ask(space,
"FFI::NullPointerError.ancestors.include? Exception")
| babelsberg/babelsberg-r | tests/modules/ffi/test_ffi.py | Python | bsd-3-clause | 2,305 |
from django_sqlalchemy.test import *
from apps.blog.models import Category
class TestContains(object):
def setup(self):
Category.__table__.insert().execute({'name': 'Python'},
{'name': 'PHP'}, {'name': 'Ruby'}, {'name': 'Smalltalk'},
{'name': 'CSharp'}, {'name': 'Modula'}, {'name': 'Algol'},
{'name': 'Forth'}, {'name': 'Pascal'})
@fails_on('sqlite')
def test_should_contain_string_in_name(self):
assert 4 == Category.objects.filter(name__contains='a').count()
assert 1 == Category.objects.filter(name__contains='A').count()
@fails_on_everything_except('sqlite')
def test_should_contain_string_in_name_on_sqlite(self):
assert 5 == Category.objects.filter(name__contains='a').count()
assert 5 == Category.objects.filter(name__contains='A').count()
def test_should_contain_string_in_name_regardless_of_case(self):
assert 5 == Category.objects.filter(name__icontains='a').count()
assert 5 == Category.objects.filter(name__icontains='A').count()
def test_should_contain_string_at_beginning(self):
category = Category.objects.filter(name__contains='Sma')
assert 1 == category.count()
assert_equal(u'Smalltalk', category[0].name)
def test_should_contain_string_at_end(self):
category = Category.objects.filter(name__contains='arp')
assert 1 == category.count()
assert_equal(u'CSharp', category[0].name)
| brosner/django-sqlalchemy | tests/query/test_contains.py | Python | bsd-3-clause | 1,486 |
'''
script @ mandrewcito
'''
import cv2
import numpy as np
import sys
def callback(x):
x = cv2.getTrackbarPos('Kernel X','image')
y = cv2.getTrackbarPos('Kernel Y','image')
sigma = cv2.getTrackbarPos('sigma/100','image')
img =cv2.GaussianBlur(imgOrig,(x,y),sigma/100.0)
cv2.imshow('image',img)
# Get the total number of args passed to the demo.py
total = len(sys.argv)
# Get the arguments list
cmdargs = str(sys.argv)
cv2.namedWindow('image',cv2.CV_WINDOW_AUTOSIZE)
# create trackbars for color change
cv2.createTrackbar('Kernel X','image',1,100,callback)
cv2.createTrackbar('Kernel Y','image',1,100,callback)
cv2.createTrackbar('sigma/100','image',0,1000,callback)
imgOrig = cv2.imread(sys.argv[1])
img=imgOrig
cv2.startWindowThread()
cv2.imshow('image',img)
cv2.waitKey(0) & 0xFF
cv2.destroyAllWindows()
| mandrewcito/EOpenCV | scriptExamples/gaussianFilter.py | Python | bsd-3-clause | 826 |
"""
SymPy core decorators.
The purpose of this module is to expose decorators without any other
dependencies, so that they can be easily imported anywhere in sympy/core.
"""
from functools import wraps
from sympify import SympifyError, sympify
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
@wraps(func)
def new_func(*args, **kwargs):
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(
"Call to deprecated function.",
feature=func.__name__ + "()"
).warn()
return func(*args, **kwargs)
return new_func
def _sympifyit(arg, retval=None):
"""decorator to smartly _sympify function arguments
@_sympifyit('other', NotImplemented)
def add(self, other):
...
In add, other can be thought of as already being a SymPy object.
If it is not, the code is likely to catch an exception, then other will
be explicitly _sympified, and the whole code restarted.
if _sympify(arg) fails, NotImplemented will be returned
see: __sympifyit
"""
def deco(func):
return __sympifyit(func, arg, retval)
return deco
def __sympifyit(func, arg, retval=None):
"""decorator to _sympify `arg` argument for function `func`
don't use directly -- use _sympifyit instead
"""
# we support f(a,b) only
assert func.func_code.co_argcount
# only b is _sympified
assert func.func_code.co_varnames[1] == arg
if retval is None:
@wraps(func)
def __sympifyit_wrapper(a, b):
return func(a, sympify(b, strict=True))
else:
@wraps(func)
def __sympifyit_wrapper(a, b):
try:
return func(a, sympify(b, strict=True))
except SympifyError:
return retval
return __sympifyit_wrapper
def call_highest_priority(method_name):
"""A decorator for binary special methods to handle _op_priority.
Binary special methods in Expr and its subclasses use a special attribute
'_op_priority' to determine whose special method will be called to
handle the operation. In general, the object having the highest value of
'_op_priority' will handle the operation. Expr and subclasses that define
custom binary special methods (__mul__, etc.) should decorate those
methods with this decorator to add the priority logic.
The ``method_name`` argument is the name of the method of the other class
that will be called. Use this decorator in the following manner::
# Call other.__rmul__ if other._op_priority > self._op_priority
@call_highest_priority('__rmul__')
def __mul__(self, other):
...
# Call other.__mul__ if other._op_priority > self._op_priority
@call_highest_priority('__mul__')
def __rmul__(self, other):
...
"""
def priority_decorator(func):
def binary_op_wrapper(self, other):
if hasattr(other, '_op_priority'):
if other._op_priority > self._op_priority:
try:
f = getattr(other, method_name)
except AttributeError:
pass
else:
return f(self)
return func(self, other)
return binary_op_wrapper
return priority_decorator
| ichuang/sympy | sympy/core/decorators.py | Python | bsd-3-clause | 3,543 |
from .models import CreateModel, DeleteModel, AlterModelTable, AlterUniqueTogether, AlterIndexTogether
from .fields import AddField, RemoveField, AlterField, RenameField
| denisenkom/django | django/db/migrations/operations/__init__.py | Python | bsd-3-clause | 170 |
# -*- coding: utf-8 -*-
"""
ulmo.ncdc.ghcn_daily.core
~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides direct access to `National Climatic Data Center`_
`Global Historical Climate Network - Daily`_ dataset.
.. _National Climatic Data Center: http://www.ncdc.noaa.gov
.. _Global Historical Climate Network - Daily: http://www.ncdc.noaa.gov/oa/climate/ghcn-daily/
"""
import itertools
import os
import numpy as np
import pandas
from tsgettoolbox.ulmo import util
GHCN_DAILY_DIR = os.path.join(util.get_ulmo_dir(), "ncdc/ghcn_daily")
def get_data(station_id, elements=None, update=True, as_dataframe=False):
"""Retrieves data for a given station.
Parameters
----------
station_id : str
Station ID to retrieve data for.
elements : ``None``, str, or list of str
If specified, limits the query to given element code(s).
update : bool
If ``True`` (default), new data files will be downloaded if they are
newer than any previously cached files. If ``False``, then previously
downloaded files will be used and new files will only be downloaded if
there is not a previously downloaded file for a given station.
as_dataframe : bool
If ``False`` (default), a dict with element codes mapped to value dicts
is returned. If ``True``, a dict with element codes mapped to equivalent
pandas.DataFrame objects will be returned. The pandas dataframe is used
internally, so setting this to ``True`` is a little bit faster as it
skips a serialization step.
Returns
-------
site_dict : dict
A dict with element codes as keys, mapped to collections of values. See
the ``as_dataframe`` parameter for more.
"""
if isinstance(elements, str):
elements = [elements]
start_columns = [
("year", 11, 15, int),
("month", 15, 17, int),
("element", 17, 21, str),
]
value_columns = [
("value", 0, 5, float),
("mflag", 5, 6, str),
("qflag", 6, 7, str),
("sflag", 7, 8, str),
]
columns = list(
itertools.chain(
start_columns,
*[
[
(name + str(n), start + 13 + (8 * n), end + 13 + (8 * n), converter)
for name, start, end, converter in value_columns
]
for n in range(1, 32)
]
)
)
station_file_path = _get_ghcn_file(station_id + ".dly", check_modified=update)
station_data = util.parse_fwf(station_file_path, columns, na_values=[-9999])
dataframes = {}
for element_name, element_df in station_data.groupby("element"):
if not elements is None and element_name not in elements:
continue
element_df["month_period"] = element_df.apply(
lambda x: pandas.Period("{}-{}".format(x["year"], x["month"])), axis=1
)
element_df = element_df.set_index("month_period")
monthly_index = element_df.index
# here we're just using pandas' builtin resample logic to construct a daily
# index for the timespan
# 2018/11/27 johanneshorak: hotfix to get ncdc ghcn_daily working again
# new resample syntax requires resample method to generate resampled index.
daily_index = element_df.resample("D").sum().index.copy()
# XXX: hackish; pandas support for this sort of thing will probably be
# added soon
month_starts = (monthly_index - 1).asfreq("D") + 1
dataframe = pandas.DataFrame(
columns=["value", "mflag", "qflag", "sflag"], index=daily_index
)
for day_of_month in range(1, 32):
dates = [
date
for date in (month_starts + day_of_month - 1)
if date.day == day_of_month
]
if not dates:
continue
months = pandas.PeriodIndex([pandas.Period(date, "M") for date in dates])
for column_name in dataframe.columns:
col = column_name + str(day_of_month)
dataframe[column_name][dates] = element_df[col][months]
dataframes[element_name] = dataframe
if as_dataframe:
return dataframes
return {
key: util.dict_from_dataframe(dataframe)
for key, dataframe in dataframes.items()
}
def get_stations(
country=None,
state=None,
elements=None,
start_year=None,
end_year=None,
update=True,
as_dataframe=False,
):
"""Retrieves station information, optionally limited to specific parameters.
Parameters
----------
country : str
The country code to use to limit station results. If set to ``None``
(default), then stations from all countries are returned.
state : str
The state code to use to limit station results. If set to ``None``
(default), then stations from all states are returned.
elements : ``None``, str, or list of str
If specified, station results will be limited to the given element codes
and only stations that have data for any these elements will be
returned.
start_year : int
If specified, station results will be limited to contain only stations
that have data after this year. Can be combined with the ``end_year``
argument to get stations with data within a range of years.
end_year : int
If specified, station results will be limited to contain only stations
that have data before this year. Can be combined with the ``start_year``
argument to get stations with data within a range of years.
update : bool
If ``True`` (default), new data files will be downloaded if they are
newer than any previously cached files. If ``False``, then previously
downloaded files will be used and new files will only be downloaded if
there is not a previously downloaded file for a given station.
as_dataframe : bool
If ``False`` (default), a dict with station IDs keyed to station dicts
is returned. If ``True``, a single pandas.DataFrame object will be
returned. The pandas dataframe is used internally, so setting this to
``True`` is a little bit faster as it skips a serialization step.
Returns
-------
stations_dict : dict or pandas.DataFrame
A dict or pandas.DataFrame representing station information for stations
matching the arguments. See the ``as_dataframe`` parameter for more.
"""
columns = [
("country", 0, 2, None),
("network", 2, 3, None),
("network_id", 3, 11, None),
("latitude", 12, 20, None),
("longitude", 21, 30, None),
("elevation", 31, 37, None),
("state", 38, 40, None),
("name", 41, 71, None),
("gsn_flag", 72, 75, None),
("hcn_flag", 76, 79, None),
("wm_oid", 80, 85, None),
]
stations_file = _get_ghcn_file("ghcnd-stations.txt", check_modified=update)
stations = util.parse_fwf(stations_file, columns)
if not country is None:
stations = stations[stations["country"] == country]
if not state is None:
stations = stations[stations["state"] == state]
# set station id and index by it
stations["id"] = stations[["country", "network", "network_id"]].T.apply("".join)
if not elements is None or not start_year is None or not end_year is None:
inventory = _get_inventory(update=update)
if not elements is None:
if isinstance(elements, str):
elements = [elements]
mask = np.zeros(len(inventory), dtype=bool)
for element in elements:
mask += inventory["element"] == element
inventory = inventory[mask]
if not start_year is None:
inventory = inventory[inventory["last_year"] >= start_year]
if not end_year is None:
inventory = inventory[inventory["first_year"] <= end_year]
uniques = inventory["id"].unique()
ids = pandas.DataFrame(uniques, index=uniques, columns=["id"])
stations = pandas.merge(stations, ids).set_index("id", drop=False)
stations = stations.set_index("id", drop=False)
# wm_oid gets convertidsed as a float, so cast it to str manually
# pandas versions prior to 0.13.0 could use numpy's fix-width string type
# to do this but that stopped working in pandas 0.13.0 - fortunately a
# regex-based helper method was added then, too
if pandas.__version__ < "0.13.0":
stations["wm_oid"] = stations["wm_oid"].astype("|U5")
stations["wm_oid"][stations["wm_oid"] == "nan"] = np.nan
else:
stations["wm_oid"] = stations["wm_oid"].astype("|U5").map(lambda x: x[:-2])
is_nan = stations["wm_oid"] == "n"
is_empty = stations["wm_oid"] == ""
is_invalid = is_nan | is_empty
stations.loc[is_invalid, "wm_oid"] = np.nan
if as_dataframe:
return stations
return util.dict_from_dataframe(stations)
def _get_ghcn_file(filename, check_modified=True):
base_url = "http://www1.ncdc.noaa.gov/pub/data/ghcn/daily/"
if "ghcnd-" in filename:
url = base_url + filename
else:
url = base_url + "all/" + filename
path = os.path.join(GHCN_DAILY_DIR, url.split("/")[-1])
util.download_if_new(url, path, check_modified=check_modified)
return path
def _get_inventory(update=True):
columns = [
("id", 0, 11, None),
("latitude", 12, 20, None),
("longitude", 21, 30, None),
("element", 31, 35, None),
("first_year", 36, 40, None),
("last_year", 41, 45, None),
]
inventory_file = _get_ghcn_file("ghcnd-inventory.txt", check_modified=update)
return util.parse_fwf(inventory_file, columns)
| timcera/tsgettoolbox | src/tsgettoolbox/ulmo/ncdc/ghcn_daily/core.py | Python | bsd-3-clause | 9,898 |
import unittest
from sympy import sympify
from nineml.abstraction import (
Dynamics, AnalogSendPort, Alias,
AnalogReceivePort, AnalogReducePort, Regime, On,
OutputEvent, EventReceivePort, Constant, StateVariable, Parameter,
OnCondition, OnEvent, Trigger)
import nineml.units as un
from nineml.exceptions import NineMLMathParseError, NineMLUsageError
from nineml.document import Document
from nineml.utils.iterables import unique_by_id
class ComponentClass_test(unittest.TestCase):
def test_aliases(self):
# Signature: name
# Forwarding function to self.dynamics.aliases
# No Aliases:
self.assertEqual(
list(Dynamics(name='C1').aliases),
[]
)
# 2 Aliases
C = Dynamics(name='C1', aliases=['G:= 0', 'H:=1'])
self.assertEqual(len(list((C.aliases))), 2)
self.assertEqual(
set(C.alias_names), set(['G', 'H'])
)
C = Dynamics(name='C1', aliases=['G:= 0', 'H:=1', Alias('I', '3')])
self.assertEqual(len(list((C.aliases))), 3)
self.assertEqual(
set(C.alias_names), set(['G', 'H', 'I'])
)
# Using DynamicsBlock Parameter:
C = Dynamics(name='C1', aliases=['G:= 0', 'H:=1'])
self.assertEqual(len(list((C.aliases))), 2)
self.assertEqual(
set(C.alias_names), set(['G', 'H'])
)
C = Dynamics(name='C1',
aliases=['G:= 0', 'H:=1', Alias('I', '3')])
self.assertEqual(len(list((C.aliases))), 3)
self.assertEqual(
set(C.alias_names), set(['G', 'H', 'I'])
)
# Invalid Construction:
# Invalid Valid String:
self.assertRaises(
NineMLUsageError,
Dynamics, name='C1', aliases=['H=0']
)
# Duplicate Alias Names:
Dynamics(name='C1', aliases=['H:=0', 'G:=1'])
self.assertRaises(
NineMLUsageError,
Dynamics, name='C1', aliases=['H:=0', 'H:=1']
)
self.assertRaises(
NineMLUsageError,
Dynamics, name='C1', aliases=['H:=0', Alias('H', '1')]
)
# Self referential aliases:
self.assertRaises(
NineMLUsageError,
Dynamics,
name='C1', aliases=['H := H +1'],
)
self.assertRaises(
NineMLUsageError,
Dynamics,
name='C1', aliases=['H := G + 1', 'G := H + 1'],
)
# Referencing none existent symbols:
self.assertRaises(
NineMLUsageError,
Dynamics,
name='C1',
aliases=['H := G + I'],
parameters=['P1'],
)
# Invalid Names:
self.assertRaises(
NineMLUsageError,
Dynamics,
name='C1', aliases=['H.2 := 0'],
)
self.assertRaises(
NineMLUsageError,
Dynamics,
name='C1', aliases=['2H := 0'],
)
self.assertRaises(
NineMLUsageError,
Dynamics,
name='C1', aliases=['E(H) := 0'],
)
self.assertRaises(
NineMLUsageError,
Dynamics,
name='C1', aliases=['tanh := 0'],
)
self.assertRaises(
NineMLUsageError,
Dynamics,
name='C1', aliases=['t := 0'],
)
def test_aliases_map(self):
# Signature: name
# Forwarding function to self.dynamics.alias_map
self.assertEqual(
Dynamics(name='C1')._aliases, {}
)
c1 = Dynamics(name='C1', aliases=['A:=3'])
self.assertEqual(c1.alias('A').rhs_as_python_func(), 3)
self.assertEqual(len(c1._aliases), 1)
c2 = Dynamics(name='C1', aliases=['A:=3', 'B:=5'])
self.assertEqual(c2.alias('A').rhs_as_python_func(), 3)
self.assertEqual(c2.alias('B').rhs_as_python_func(), 5)
self.assertEqual(len(c2._aliases), 2)
c3 = Dynamics(name='C1', aliases=['C:=13', 'Z:=15'])
self.assertEqual(c3.alias('C').rhs_as_python_func(), 13)
self.assertEqual(c3.alias('Z').rhs_as_python_func(), 15)
self.assertEqual(len(c3._aliases), 2)
def test_analog_ports(self):
# Signature: name
# No Docstring
c = Dynamics(name='C1')
self.assertEqual(len(list(c.analog_ports)), 0)
c = Dynamics(name='C1')
self.assertEqual(len(list(c.analog_ports)), 0)
c = Dynamics(name='C1', aliases=['A:=2'],
analog_ports=[AnalogSendPort('A')])
self.assertEqual(len(list(c.analog_ports)), 1)
self.assertEqual(list(c.analog_ports)[0].mode, 'send')
self.assertEqual(len(list(c.analog_send_ports)), 1)
self.assertEqual(len(list(c.analog_receive_ports)), 0)
self.assertEqual(len(list(c.analog_reduce_ports)), 0)
c = Dynamics(name='C1', analog_ports=[AnalogReceivePort('B')])
self.assertEqual(len(list(c.analog_ports)), 1)
self.assertEqual(list(c.analog_ports)[0].mode, 'recv')
self.assertEqual(len(list(c.analog_send_ports)), 0)
self.assertEqual(len(list(c.analog_receive_ports)), 1)
self.assertEqual(len(list(c.analog_reduce_ports)), 0)
c = Dynamics(name='C1',
analog_ports=[AnalogReducePort('B', operator='+')])
self.assertEqual(len(list(c.analog_ports)), 1)
self.assertEqual(list(c.analog_ports)[0].mode, 'reduce')
self.assertEqual(list(c.analog_ports)[0].operator, '+')
self.assertEqual(len(list(c.analog_send_ports)), 0)
self.assertEqual(len(list(c.analog_receive_ports)), 0)
self.assertEqual(len(list(c.analog_reduce_ports)), 1)
# Duplicate Port Names:
self.assertRaises(
NineMLUsageError,
Dynamics,
name='C1',
aliases=['A1:=1'],
analog_ports=[AnalogReducePort('B', operator='+'),
AnalogSendPort('B')]
)
self.assertRaises(
NineMLUsageError,
Dynamics,
name='C1',
aliases=['A1:=1'],
analog_ports=[AnalogSendPort('A'), AnalogSendPort('A')]
)
self.assertRaises(
NineMLUsageError,
Dynamics,
name='C1',
aliases=['A1:=1'],
analog_ports=[AnalogReceivePort('A'), AnalogReceivePort('A')]
)
self.assertRaises(
NineMLUsageError,
lambda: Dynamics(name='C1', analog_ports=[AnalogReceivePort('1')])
)
self.assertRaises(
NineMLUsageError,
lambda: Dynamics(name='C1', analog_ports=[AnalogReceivePort('?')])
)
def duplicate_port_name_event_analog(self):
# Check different names are OK:
Dynamics(
name='C1', aliases=['A1:=1'],
event_ports=[EventReceivePort('A')],
analog_ports=[AnalogSendPort('A')])
self.assertRaises(
NineMLUsageError,
Dynamics,
name='C1',
aliases=['A1:=1'],
event_ports=[EventReceivePort('A')],
analog_ports=[AnalogSendPort('A')]
)
def test_event_ports(self):
# Signature: name
# No Docstring
# Check inference of output event ports:
c = Dynamics(
name='Comp1',
regimes=Regime(
transitions=[
On('V > a', do=OutputEvent('ev_port1')),
On('V > b', do=OutputEvent('ev_port1')),
On('V < c', do=OutputEvent('ev_port2')),
]
),
)
self.assertEqual(len(list(c.event_ports)), 2)
# Check inference of output event ports:
c = Dynamics(
name='Comp1',
regimes=[
Regime(name='r1',
transitions=[
On('V > a', do=OutputEvent('ev_port1'), to='r2'),
On('V < b', do=OutputEvent('ev_port2'))]),
Regime(name='r2',
transitions=[
On('V > a', do=OutputEvent('ev_port2'), to='r1'),
On('V < b', do=OutputEvent('ev_port3'))])
]
)
self.assertEqual(len(list(c.event_ports)), 3)
# Check inference of output event ports:
c = Dynamics(
name='Comp1',
regimes=[
Regime(name='r1',
transitions=[
On('spikeinput1', do=[]),
On('spikeinput2', do=OutputEvent('ev_port2'),
to='r2')]),
Regime(name='r2',
transitions=[
On('V > a', do=OutputEvent('ev_port2')),
On('spikeinput3', do=OutputEvent('ev_port3'),
to='r1')])
]
)
self.assertEqual(len(list(c.event_ports)), 5)
def test_parameters(self):
# Signature: name
# No Docstring
# No parameters; nothing to infer
c = Dynamics(name='cl')
self.assertEqual(len(list(c.parameters)), 0)
# Mismatch between inferred and actual parameters
self.assertRaises(
NineMLUsageError,
Dynamics, name='cl', parameters=['a'])
# Single parameter inference from an alias block
c = Dynamics(name='cl', aliases=['A1:=a'])
self.assertEqual(len(list(c.parameters)), 1)
self.assertEqual(list(c.parameters)[0].name, 'a')
# More complex inference:
c = Dynamics(name='cl', aliases=['A1:=a+e', 'B1:=a+pi+b'],
constants=[Constant('pi', 3.141592653589793)])
self.assertEqual(len(list(c.parameters)), 3)
self.assertEqual(sorted([p.name for p in c.parameters]),
['a', 'b', 'e'])
# From State Assignments and Differential Equations, and Conditionals
c = Dynamics(name='cl',
aliases=['A1:=a+e', 'B1:=a+pi+b'],
regimes=Regime('dX/dt = (6 + c + sin(d))/t',
'dV/dt = 1.0/t',
transitions=On('V>Vt',
do=['X = X + f', 'V=0'])),
constants=[Constant('pi', 3.1415926535)])
self.assertEqual(len(list(c.parameters)), 7)
self.assertEqual(
sorted([p.name for p in c.parameters]),
['Vt', 'a', 'b', 'c', 'd', 'e', 'f'])
self.assertRaises(
NineMLUsageError,
Dynamics,
name='cl',
aliases=['A1:=a+e', 'B1:=a+pi+b'],
regimes=Regime('dX/dt = 6 + c + sin(d)',
'dV/dt = 1.0',
transitions=On('V>Vt', do=['X = X + f', 'V=0'])
),
parameters=['a', 'b', 'c'])
def test_regimes(self):
c = Dynamics(name='cl', )
self.assertEqual(len(list(c.regimes)), 0)
c = Dynamics(name='cl',
regimes=Regime('dX/dt=1/t',
name='r1',
transitions=On('X>X1', do=['X = X0'],
to=None)))
self.assertEqual(len(list(c.regimes)), 1)
c = Dynamics(name='cl',
regimes=[
Regime('dX/dt=1/t',
name='r1',
transitions=On('X>X1', do=['X=X0'],
to='r2')),
Regime('dX/dt=1/t',
name='r2',
transitions=On('X>X1', do=['X=X0'],
to='r3')),
Regime('dX/dt=1/t',
name='r3',
transitions=On('X>X1', do=['X=X0'],
to='r4')),
Regime('dX/dt=1/t',
name='r4',
transitions=On('X>X1', do=['X=X0'],
to='r1'))])
self.assertEqual(len(list(c.regimes)), 4)
self.assertEqual(
set(c.regime_names),
set(['r1', 'r2', 'r3', 'r4'])
)
c = Dynamics(name='cl',
regimes=[
Regime('dX/dt=1/t', name='r1',
transitions=On('X>X1', do=['X=X0'],
to='r2')),
Regime('dX/dt=1/t',
name='r2',
transitions=On('X>X1', do=['X=X0'],
to='r3')),
Regime('dX/dt=1/t',
name='r3',
transitions=On('X>X1', do=['X=X0'],
to='r4')),
Regime('dX/dt=1/t',
name='r4',
transitions=On('X>X1', do=['X=X0'],
to='r1'))])
self.assertEqual(len(list(c.regimes)), 4)
self.assertEqual(
set([r.name for r in c.regimes]),
set(['r1', 'r2', 'r3', 'r4'])
)
# Duplicate Names:
self.assertRaises(
NineMLUsageError,
Dynamics, name='cl',
regimes=[
Regime('dX/dt=1/t',
name='r',
transitions=On('X>X1', do=['X=X0'])),
Regime('dX/dt=1/t',
name='r',
transitions=On('X>X1', do=['X=X0'],)), ]
)
def test_regime_aliases(self):
a = Dynamics(
name='a',
aliases=[Alias('A', '4/t')],
regimes=[
Regime('dX/dt=1/t + A',
name='r1',
transitions=On('X>X1', do=['X=X0'], to='r2')),
Regime('dX/dt=1/t + A',
name='r2',
transitions=On('X>X1', do=['X=X0'],
to='r1'),
aliases=[Alias('A', '8 / t')])])
self.assertEqual(a.regime('r2').alias('A'), Alias('A', '8 / t'))
self.assertRaises(
NineMLUsageError,
Dynamics,
name='a',
regimes=[
Regime('dX/dt=1/t + A',
name='r1',
transitions=On('X>X1', do=['X=X0'], to='r2')),
Regime('dX/dt=1/t + A',
name='r2',
transitions=On('X>X1', do=['X=X0'],
to='r1'),
aliases=[Alias('A', '8 / t')])])
document = Document()
a_xml = a.serialize(format='xml', version=1, document=document)
b = Dynamics.unserialize(a_xml, format='xml', version=1,
document=Document(un.dimensionless.clone()))
self.assertEqual(a, b,
"Dynamics with regime-specific alias failed xml "
"roundtrip:\n{}".format(a.find_mismatch(b)))
def test_state_variables(self):
# No parameters; nothing to infer
c = Dynamics(name='cl')
self.assertEqual(len(list(c.state_variables)), 0)
# From State Assignments and Differential Equations, and Conditionals
c = Dynamics(
name='cl',
aliases=['A1:=a+e', 'B1:=a+pi+b'],
regimes=Regime('dX/dt = (6 + c + sin(d))/t',
'dV/dt = 1.0/t',
transitions=On('V>Vt', do=['X = X + f', 'V=0'])))
self.assertEqual(
set(c.state_variable_names),
set(['X', 'V']))
self.assertRaises(
NineMLUsageError,
Dynamics,
name='cl',
aliases=['A1:=a+e', 'B1:=a+pi+b'],
regimes=Regime('dX/dt = 6 + c + sin(d)',
'dV/dt = 1.0',
transitions=On('V>Vt', do=['X = X + f', 'V=0'])
),
state_variables=['X'])
# Shouldn't pick up 'e' as a parameter:
self.assertRaises(
NineMLUsageError,
Dynamics,
name='cl',
aliases=['A1:=a+e', 'B1:=a+pi+b'],
regimes=Regime('dX/dt = 6 + c + sin(d)',
'dV/dt = 1.0',
transitions=On('V>Vt', do=['X = X + f', 'V=0'])
),
state_variables=['X', 'V', 'Vt'])
c = Dynamics(name='cl',
regimes=[
Regime('dX1/dt=1/t',
name='r1',
transitions=On('X>X1', do=['X=X0'],
to='r2')),
Regime('dX1/dt=1/t',
name='r2',
transitions=On('X>X1', do=['X=X0'],
to='r3')),
Regime('dX2/dt=1/t',
name='r3',
transitions=On('X>X1', do=['X=X0'],
to='r4')),
Regime('dX2/dt=1/t',
name='r4',
transitions=On('X>X1', do=['X=X0'],
to='r1'))])
self.assertEqual(set(c.state_variable_names),
set(['X1', 'X2', 'X']))
def test_transitions(self):
c = Dynamics(name='cl',
regimes=[
Regime('dX1/dt=1/t',
name='r1',
transitions=[On('X>X1', do=['X=X0'],
to='r2'),
On('X>X2', do=['X=X0'],
to='r3'), ]
),
Regime('dX1/dt=1/t',
name='r2',
transitions=On('X>X1', do=['X=X0'],
to='r3'),),
Regime('dX2/dt=1/t',
name='r3',
transitions=[On('X>X1', do=['X=X0'],
to='r4'),
On('X>X2', do=['X=X0'],
to=None)]),
Regime('dX2/dt=1/t',
name='r4',
transitions=On('X>X1', do=['X=X0'],
to=None))])
self.assertEqual(len(list(c.all_transitions())), 6)
r1 = c.regime('r1')
r2 = c.regime('r2')
r3 = c.regime('r3')
r4 = c.regime('r4')
self.assertEqual(len(list(r1.transitions)), 2)
self.assertEqual(len(list(r2.transitions)), 1)
self.assertEqual(len(list(r3.transitions)), 2)
self.assertEqual(len(list(r4.transitions)), 1)
def target_regimes(regime):
return unique_by_id(t.target_regime for t in regime.transitions)
self.assertEqual(target_regimes(r1), [r2, r3])
self.assertEqual(target_regimes(r2), [r3])
self.assertEqual(target_regimes(r3), [r3, r4])
self.assertEqual(target_regimes(r4), [r4])
def test_all_expressions(self):
a = Dynamics(
name='A',
aliases=['A1:=P1 * SV2', 'A2 := ARP1 + SV2', 'A3 := SV1'],
state_variables=[
StateVariable('SV1', dimension=un.voltage),
StateVariable('SV2', dimension=un.current)],
regimes=[
Regime(
'dSV1/dt = -SV1 / P2',
'dSV2/dt = A3 / ARP2 + SV2 / P2',
transitions=[On('SV1 > P3', do=[OutputEvent('emit')]),
On('spikein', do=[OutputEvent('emit')])],
name='R1'
),
Regime(name='R2', transitions=On('(SV1 > C1) & (SV2 < P4)',
to='R1'))
],
analog_ports=[AnalogReceivePort('ARP1', dimension=un.current),
AnalogReceivePort('ARP2',
dimension=(un.resistance *
un.time)),
AnalogSendPort('A1',
dimension=un.voltage * un.current),
AnalogSendPort('A2', dimension=un.current)],
parameters=[Parameter('P1', dimension=un.voltage),
Parameter('P2', dimension=un.time),
Parameter('P3', dimension=un.voltage),
Parameter('P4', dimension=un.current)],
constants=[Constant('C1', value=1.0, units=un.mV)]
)
self.assertEqual(
set(a.all_expressions), set((
sympify('P1 * SV2'), sympify('ARP1 + SV2'), sympify('SV1'),
sympify('-SV1 / P2'), sympify('-SV1 / P2'),
sympify('A3 / ARP2 + SV2 / P2'), sympify('SV1 > P3'),
sympify('(SV1 > C1) & (SV2 < P4)'))),
"All expressions were not extracted from component class")
class TestOn(unittest.TestCase):
def test_On(self):
# Signature: name(trigger, do=None, to=None)
# No Docstring
# Test that we are correctly inferring OnEvents and OnConditions.
self.assertEqual(type(On('V>0')), OnCondition)
self.assertEqual(type(On('V<0')), OnCondition)
self.assertEqual(type(On('(V<0) & (K>0)')), OnCondition)
self.assertEqual(type(On('V==0')), OnCondition)
self.assertEqual(
type(On("q > 1 / (( 1 + mg_conc * eta * exp ( -1 * gamma*V)))")),
OnCondition)
self.assertEqual(type(On('SP0')), OnEvent)
self.assertEqual(type(On('SP1')), OnEvent)
# Check we can use 'do' with single and multiple values
tr = On('V>0')
self.assertEqual(len(list(tr.output_events)), 0)
self.assertEqual(len(list(tr.state_assignments)), 0)
tr = On('SP0')
self.assertEqual(len(list(tr.output_events)), 0)
self.assertEqual(len(list(tr.state_assignments)), 0)
tr = On('V>0', do=OutputEvent('spike'))
self.assertEqual(len(list(tr.output_events)), 1)
self.assertEqual(len(list(tr.state_assignments)), 0)
tr = On('SP0', do=OutputEvent('spike'))
self.assertEqual(len(list(tr.output_events)), 1)
self.assertEqual(len(list(tr.state_assignments)), 0)
tr = On('V>0', do=[OutputEvent('spike')])
self.assertEqual(len(list(tr.output_events)), 1)
self.assertEqual(len(list(tr.state_assignments)), 0)
tr = On('SP0', do=[OutputEvent('spike')])
self.assertEqual(len(list(tr.output_events)), 1)
self.assertEqual(len(list(tr.state_assignments)), 0)
tr = On('V>0', do=['y=2', OutputEvent('spike'), 'x=1'])
self.assertEqual(len(list(tr.output_events)), 1)
self.assertEqual(len(list(tr.state_assignments)), 2)
tr = On('SP0', do=['y=2', OutputEvent('spike'), 'x=1'])
self.assertEqual(len(list(tr.output_events)), 1)
self.assertEqual(len(list(tr.state_assignments)), 2)
class OnCondition_test(unittest.TestCase):
def test_trigger(self):
invalid_triggers = ['true(',
'V < (V+10',
'V (< V+10)',
'V (< V+10)',
'1 / ( 1 + mg_conc * eta * exp(-1 * gamma*V))'
'1..0'
'..0']
for tr in invalid_triggers:
self.assertRaises(NineMLMathParseError, OnCondition, tr)
# Test Come Conditions:
namespace = {
"A": 10,
"B": 5,
"tau_r": 5,
"V": 20,
"Vth": -50.0,
"t_spike": 1.0,
"q": 11.0,
"t": 0.9,
"tref": 0.1
}
cond_exprs = [
["A > -B/tau_r", ("A", "B", "tau_r"), ()],
["(V > 1.0) & !(V<10.0)", ("V",), ()],
["!!(V>10)", ("V"), ()],
["!!(V>10)", ("V"), ()],
["V>exp(Vth)", ("V", "Vth"), ('exp',)],
["!(V>Vth)", ("V", "Vth"), ()],
["!(V>Vth)", ("V", "Vth"), ()],
["exp(V)>Vth", ("V", "Vth"), ("exp",)],
["true", (), ()],
["(V < (Vth+q)) & (t > t_spike)", ("t_spike", "t", "q", "Vth",
"V"), ()],
["(V < (Vth+q)) | (t > t_spike)", ("t_spike", "Vth", "q", "V",
"t"), ()],
["(true)", (), ()],
["!true", (), ()],
["!false", (), ()],
["t >= t_spike + tref", ("t", "t_spike", "tref"), ()],
["true & !false", (), ()]
]
return_values = [
True,
True,
True,
True,
True,
False,
False,
True,
True,
False,
False,
True,
False,
True,
False,
True
]
for i, (expr, expt_vars, expt_funcs) in enumerate(cond_exprs):
c = OnCondition(trigger=expr)
self.assertEqual(set(c.trigger.rhs_symbol_names), set(expt_vars))
self.assertEqual(set(str(f) for f in c.trigger.rhs_funcs),
set(expt_funcs))
python_func = c.trigger.rhs_as_python_func
param_dict = dict([(v, namespace[v]) for v in expt_vars])
self.assertEqual(return_values[i], python_func(**param_dict))
def test_trigger_crossing_time_expr(self):
self.assertEqual(Trigger('t > t_next').crossing_time_expr.rhs,
sympify('t_next'))
self.assertEqual(Trigger('t^2 > t_next').crossing_time_expr, None)
self.assertEqual(Trigger('a < b').crossing_time_expr, None)
self.assertEqual(
Trigger('t > t_next || t > t_next2').crossing_time_expr.rhs,
sympify('Min(t_next, t_next2)'))
self.assertEqual(
Trigger('t > t_next || a < b').crossing_time_expr, None)
def test_make_strict(self):
self.assertEqual(
Trigger._make_strict(
sympify('(a >= 0.5) & ~(b < (10 * c * e)) | (c <= d)')),
sympify('(a > 0.5) & (b > (10 * c * e)) | (c < d)'))
class OnEvent_test(unittest.TestCase):
def test_Constructor(self):
pass
def test_src_port_name(self):
self.assertRaises(NineMLUsageError, OnEvent, '1MyEvent1 ')
self.assertRaises(NineMLUsageError, OnEvent, 'MyEvent1 2')
self.assertRaises(NineMLUsageError, OnEvent, 'MyEvent1* ')
self.assertEqual(OnEvent(' MyEvent1 ').src_port_name, 'MyEvent1')
self.assertEqual(OnEvent(' MyEvent2').src_port_name, 'MyEvent2')
class Regime_test(unittest.TestCase):
def test_Constructor(self):
pass
def test_add_on_condition(self):
# Signature: name(self, on_condition)
# Add an OnCondition transition which leaves this regime
#
# If the on_condition object has not had its target regime name set in
# the constructor, or by calling its ``set_target_regime_name()``, then
# the target is assumed to be this regime, and will be set
# appropriately.
#
# The source regime for this transition will be set as this regime.
r = Regime(name='R1')
self.assertEqual(unique_by_id(r.on_conditions), [])
r.add(OnCondition('sp1>0'))
self.assertEqual(len(unique_by_id(r.on_conditions)), 1)
self.assertEqual(len(unique_by_id(r.on_events)), 0)
self.assertEqual(len(unique_by_id(r.transitions)), 1)
def test_add_on_event(self):
# Signature: name(self, on_event)
# Add an OnEvent transition which leaves this regime
#
# If the on_event object has not had its target regime name set in the
# constructor, or by calling its ``set_target_regime_name()``, then the
# target is assumed to be this regime, and will be set appropriately.
#
# The source regime for this transition will be set as this regime.
# from nineml.abstraction.component.dynamics import Regime
r = Regime(name='R1')
self.assertEqual(unique_by_id(r.on_events), [])
r.add(OnEvent('sp'))
self.assertEqual(len(unique_by_id(r.on_events)), 1)
self.assertEqual(len(unique_by_id(r.on_conditions)), 0)
self.assertEqual(len(unique_by_id(r.transitions)), 1)
def test_get_next_name(self):
# Signature: name(cls)
# Return the next distinct autogenerated name
n1 = Regime.get_next_name()
n2 = Regime.get_next_name()
n3 = Regime.get_next_name()
self.assertNotEqual(n1, n2)
self.assertNotEqual(n2, n3)
def test_name(self):
self.assertRaises(NineMLUsageError, Regime, name='&Hello')
self.assertRaises(NineMLUsageError, Regime, name='2Hello')
self.assertEqual(Regime(name='Hello').name, 'Hello')
self.assertEqual(Regime(name='Hello2').name, 'Hello2')
def test_time_derivatives(self):
# Signature: name
# Returns the state-variable time-derivatives in this regime.
#
# .. note::
#
# This is not guarenteed to contain the time derivatives for all
# the state-variables specified in the component. If they are not
# defined, they are assumed to be zero in this regime.
r = Regime('dX1/dt=0',
'dX2/dt=0',
name='r1')
self.assertEqual(
set([td.variable for td in r.time_derivatives]),
set(['X1', 'X2']))
# Defining a time derivative twice:
self.assertRaises(
NineMLUsageError,
Regime, 'dX/dt=1', 'dX/dt=2')
# Assigning to a value:
self.assertRaises(
NineMLUsageError,
Regime, 'X=1')
class StateVariable_test(unittest.TestCase):
def test_name(self):
# Signature: name
# No Docstring
self.assertRaises(NineMLUsageError, StateVariable, name='&Hello')
self.assertRaises(NineMLUsageError, StateVariable, name='2Hello')
self.assertEqual(StateVariable(name='Hello').name, 'Hello')
self.assertEqual(StateVariable(name='Hello2').name, 'Hello2')
class Query_test(unittest.TestCase):
def test_event_send_receive_ports(self):
# Signature: name(self)
# Get the ``recv`` EventPorts
# from nineml.abstraction.component.componentqueryer import
# ComponentClassQueryer
# Check inference of output event ports:
c = Dynamics(
name='Comp1',
regimes=Regime(
transitions=[
On('in_ev1', do=OutputEvent('ev_port1')),
On('V < b', do=OutputEvent('ev_port1')),
On('V < c', do=OutputEvent('ev_port2')),
]
),
)
self.assertEqual(len(list(c.event_receive_ports)), 1)
self.assertEqual((list(list(c.event_receive_ports))[0]).name,
'in_ev1')
self.assertEqual(len(list(c.event_send_ports)), 2)
self.assertEqual(set(c.event_send_port_names),
set(['ev_port1', 'ev_port2']))
# Check inference of output event ports:
c = Dynamics(
name='Comp1',
regimes=[
Regime(name='r1',
transitions=[
On('V > a', do=OutputEvent('ev_port1'), to='r2'),
On('in_ev1', do=OutputEvent('ev_port2'))]),
Regime(name='r2',
transitions=[
On('V > a', do=OutputEvent('ev_port2'), to='r1'),
On('in_ev2', do=OutputEvent('ev_port3'))])
]
)
self.assertEqual(len(list(c.event_receive_ports)), 2)
self.assertEqual(set(c.event_receive_port_names),
set(['in_ev1', 'in_ev2']))
self.assertEqual(len(list(c.event_send_ports)), 3)
self.assertEqual(set(c.event_send_port_names),
set(['ev_port1', 'ev_port2', 'ev_port3']))
# Check inference of output event ports:
c = Dynamics(
name='Comp1',
regimes=[
Regime(name='r1',
transitions=[
On('spikeinput1', do=[]),
On('spikeinput2', do=[
OutputEvent('ev_port1'),
OutputEvent('ev_port2')], to='r2')]),
Regime(name='r2',
transitions=[
On('V > a', do=OutputEvent('ev_port2')),
On('spikeinput3', do=OutputEvent('ev_port3'),
to='r1')])
]
)
self.assertEqual(len(list(c.event_receive_ports)), 3)
self.assertEqual(set(c.event_receive_port_names),
set(['spikeinput1', 'spikeinput2', 'spikeinput3']))
self.assertEqual(len(list(c.event_send_ports)), 3)
self.assertEqual(set(c.event_send_port_names),
set(['ev_port1', 'ev_port2', 'ev_port3']))
def test_ports(self):
# Signature: name
# Return an iterator over all the port (Event & Analog) in the
# component
# from nineml.abstraction.component.componentqueryer import
# ComponentClassQueryer
c = Dynamics(
name='Comp1',
regimes=[
Regime(name='r1',
transitions=[
On('spikeinput1', do=[]),
On('spikeinput2', do=OutputEvent('ev_port2'),
to='r2')]),
Regime(name='r2',
transitions=[
On('V > a', do=OutputEvent('ev_port2')),
On('spikeinput3', do=OutputEvent('ev_port3'),
to='r1')])
],
aliases=['A1:=0', 'C:=0'],
analog_ports=[AnalogSendPort('A1'), AnalogReceivePort('B'),
AnalogSendPort('C')]
)
ports = list(list(c.ports))
port_names = [p.name for p in ports]
self.assertEqual(len(port_names), 8)
self.assertEqual(set(port_names),
set(['A1', 'B', 'C', 'spikeinput1', 'spikeinput2',
'spikeinput3', 'ev_port2', 'ev_port3'])
)
def test_regime(self):
# Signature: name(self, name=None)
# Find a regime in the component by name
# from nineml.abstraction.component.componentqueryer import
# ComponentClassQueryer
c = Dynamics(name='cl',
regimes=[
Regime('dX/dt=1/t',
name='r1',
transitions=On('X>X1', do=['X=X0'],
to='r2')),
Regime('dX/dt=1/t',
name='r2',
transitions=On('X>X1', do=['X=X0'],
to='r3')),
Regime('dX/dt=1/t',
name='r3',
transitions=On('X>X1', do=['X=X0'],
to='r4')),
Regime('dX/dt=1/t',
name='r4',
transitions=On('X>X1', do=['X=X0'],
to='r1'))])
self.assertEqual(c.regime(name='r1').name, 'r1')
self.assertEqual(c.regime(name='r2').name, 'r2')
self.assertEqual(c.regime(name='r3').name, 'r3')
self.assertEqual(c.regime(name='r4').name, 'r4')
| INCF/lib9ML | test/unittests/abstraction_test/dynamics_test.py | Python | bsd-3-clause | 37,676 |
from django.db import models
class Place(models.Model):
name = models.CharField(max_length=128)
url = models.URLField(max_length=256, verify_exists=False)
date = models.DateTimeField(auto_now_add=True)
class Meta(object):
topsoil_exclude = ['date']
def get_absolute_url(self):
return "/places/%i" % self.id
def get_edit_url(self):
return "/places/%i/edit" % self.id
| wooster/django-topsoil | tests/test_provider/testapp/models.py | Python | bsd-3-clause | 430 |
#!/usr/bin/env python
import glob
import os
for f in sorted(glob.glob('*.ini')):
print 'Running ' + f + '...'
os.system('./src/engine ' + f)
| timvdm/ComputerGraphics | run_engine.py | Python | bsd-3-clause | 151 |
from django.conf.urls import patterns, url
urlpatterns = patterns('ietf.secr.drafts.views',
url(r'^$', 'search', name='drafts'),
url(r'^add/$', 'add', name='drafts_add'),
url(r'^approvals/$', 'approvals', name='drafts_approvals'),
url(r'^dates/$', 'dates', name='drafts_dates'),
url(r'^nudge-report/$', 'nudge_report', name='drafts_nudge_report'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/$', 'view', name='drafts_view'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/abstract/$', 'abstract', name='drafts_abstract'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/announce/$', 'announce', name='drafts_announce'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/authors/$', 'authors', name='drafts_authors'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/author_delete/(?P<oid>\d{1,6})$',
'author_delete', name='drafts_author_delete'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/confirm/$', 'confirm', name='drafts_confirm'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/edit/$', 'edit', name='drafts_edit'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/extend/$', 'extend', name='drafts_extend'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/email/$', 'email', name='drafts_email'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/makerfc/$', 'makerfc', name='drafts_makerfc'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/replace/$', 'replace', name='drafts_replace'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/resurrect/$', 'resurrect', name='drafts_resurrect'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/revision/$', 'revision', name='drafts_revision'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/update/$', 'update', name='drafts_update'),
url(r'^(?P<id>[A-Za-z0-9._\-\+]+)/withdraw/$', 'withdraw', name='drafts_withdraw'),
)
| wpjesus/codematch | ietf/secr/drafts/urls.py | Python | bsd-3-clause | 1,671 |
# -*- coding: utf-8 -*-
import os
from time import sleep
import nipype.interfaces.base as nib
import pytest
import nipype.pipeline.engine as pe
from nipype.pipeline.plugins.somaflow import soma_not_loaded
class InputSpec(nib.TraitedSpec):
input1 = nib.traits.Int(desc='a random int')
input2 = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
output1 = nib.traits.List(nib.traits.Int, desc='outputs')
class SomaTestInterface(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
def _run_interface(self, runtime):
runtime.returncode = 0
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output1'] = [1, self.inputs.input1]
return outputs
@pytest.mark.skipif(soma_not_loaded, reason="soma not loaded")
def test_run_somaflow(tmpdir):
os.chdir(str(tmpdir))
pipe = pe.Workflow(name='pipe')
mod1 = pe.Node(interface=SomaTestInterface(), name='mod1')
mod2 = pe.MapNode(interface=SomaTestInterface(),
iterfield=['input1'],
name='mod2')
pipe.connect([(mod1, mod2, [('output1', 'input1')])])
pipe.base_dir = os.getcwd()
mod1.inputs.input1 = 1
execgraph = pipe.run(plugin="SomaFlow")
names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()]
node = list(execgraph.nodes())[names.index('pipe.mod1')]
result = node.get_output('output1')
assert result == [1, 1]
| mick-d/nipype | nipype/pipeline/plugins/tests/test_somaflow.py | Python | bsd-3-clause | 1,509 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from datetime import datetime
import doctest
import os
import os.path
import shutil
from StringIO import StringIO
import time
import tempfile
import threading
import unittest
import urlparse
from couchdb import client, http
from couchdb.tests import testutil
from schematics.validation import validate_instance
class ServerTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_init_with_resource(self):
sess = http.Session()
res = http.Resource(client.DEFAULT_BASE_URL, sess)
serv = client.Server(url=res)
serv.config()
def test_init_with_session(self):
sess = http.Session()
serv = client.Server(client.DEFAULT_BASE_URL, session=sess)
serv.config()
self.assertTrue(serv.resource.session is sess)
def test_exists(self):
self.assertTrue(client.Server(client.DEFAULT_BASE_URL))
self.assertFalse(client.Server('http://localhost:9999'))
def test_repr(self):
repr(self.server)
def test_server_vars(self):
version = self.server.version()
self.assertTrue(isinstance(version, basestring))
config = self.server.config()
self.assertTrue(isinstance(config, dict))
tasks = self.server.tasks()
self.assertTrue(isinstance(tasks, list))
def test_server_stats(self):
stats = self.server.stats()
self.assertTrue(isinstance(stats, dict))
stats = self.server.stats('httpd/requests')
self.assertTrue(isinstance(stats, dict))
self.assertTrue(len(stats) == 1 and len(stats['httpd']) == 1)
def test_get_db_missing(self):
self.assertRaises(http.ResourceNotFound,
lambda: self.server['couchdb-python/missing'])
def test_create_db_conflict(self):
name, db = self.temp_db()
self.assertRaises(http.PreconditionFailed, self.server.create,
name)
def test_delete_db(self):
name, db = self.temp_db()
assert name in self.server
self.del_db(name)
assert name not in self.server
def test_delete_db_missing(self):
self.assertRaises(http.ResourceNotFound, self.server.delete,
'couchdb-python/missing')
def test_replicate(self):
aname, a = self.temp_db()
bname, b = self.temp_db()
id, rev = a.save({'test': 'a'})
result = self.server.replicate(aname, bname)
self.assertEquals(result['ok'], True)
self.assertEquals(b[id]['test'], 'a')
doc = b[id]
doc['test'] = 'b'
b.update([doc],validate=False)
self.server.replicate(bname, aname)
self.assertEquals(a[id]['test'], 'b')
self.assertEquals(b[id]['test'], 'b')
def test_replicate_continuous(self):
aname, a = self.temp_db()
bname, b = self.temp_db()
result = self.server.replicate(aname, bname, continuous=True)
self.assertEquals(result['ok'], True)
version = tuple(int(i) for i in self.server.version().split('.')[:2])
if version >= (0, 10):
self.assertTrue('_local_id' in result)
def test_iter(self):
aname, a = self.temp_db()
bname, b = self.temp_db()
dbs = list(self.server)
self.assertTrue(aname in dbs)
self.assertTrue(bname in dbs)
def test_len(self):
self.temp_db()
self.temp_db()
self.assertTrue(len(self.server) >= 2)
def test_uuids(self):
ls = self.server.uuids()
assert type(ls) == list
ls = self.server.uuids(count=10)
assert type(ls) == list and len(ls) == 10
class DatabaseTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_save_new(self):
doc = {'foo': 'bar'}
id, rev = self.db.save(doc)
self.assertTrue(id is not None)
self.assertTrue(rev is not None)
self.assertEqual((id, rev), (doc['_id'], doc['_rev']))
doc = self.db.get(id)
self.assertEqual(doc['foo'], 'bar')
def test_save_new_with_id(self):
doc = {'_id': 'foo'}
id, rev = self.db.save(doc)
self.assertTrue(doc['_id'] == id == 'foo')
self.assertEqual(doc['_rev'], rev)
def test_save_existing(self):
doc = {}
id_rev_old = self.db.save(doc)
doc['foo'] = True
id_rev_new = self.db.save(doc)
self.assertTrue(doc['_rev'] == id_rev_new[1])
self.assertTrue(id_rev_old[1] != id_rev_new[1])
def test_save_new_batch(self):
doc = {'_id': 'foo'}
id, rev = self.db.save(doc, batch='ok')
self.assertTrue(rev is None)
self.assertTrue('_rev' not in doc)
def test_save_existing_batch(self):
doc = {'_id': 'foo'}
self.db.save(doc)
id_rev_old = self.db.save(doc)
id_rev_new = self.db.save(doc, batch='ok')
self.assertTrue(id_rev_new[1] is None)
self.assertEqual(id_rev_old[1], doc['_rev'])
def test_exists(self):
self.assertTrue(self.db)
self.assertFalse(client.Database('couchdb-python/missing'))
def test_name(self):
# Access name assigned during creation.
name, db = self.temp_db()
self.assertTrue(db.name == name)
# Access lazily loaded name,
self.assertTrue(client.Database(db.resource.url).name == name)
def test_commit(self):
self.assertTrue(self.db.commit()['ok'] == True)
def test_create_large_doc(self):
self.db['foo'] = {'data': '0123456789' * 110 * 1024} # 10 MB
self.assertEqual('foo', self.db['foo']['_id'])
def test_doc_id_quoting(self):
self.db['foo/bar'] = {'foo': 'bar'}
self.assertEqual('bar', self.db['foo/bar']['foo'])
del self.db['foo/bar']
self.assertEqual(None, self.db.get('foo/bar'))
def test_unicode(self):
self.db[u'føø'] = {u'bår': u'Iñtërnâtiônàlizætiøn', 'baz': 'ASCII'}
self.assertEqual(u'Iñtërnâtiônàlizætiøn', self.db[u'føø'][u'bår'])
self.assertEqual(u'ASCII', self.db[u'føø'][u'baz'])
def test_disallow_nan(self):
try:
self.db['foo'] = {u'number': float('nan')}
self.fail('Expected ValueError')
except ValueError:
pass
def test_disallow_none_id(self):
deldoc = lambda: self.db.delete({'_id': None, '_rev': None})
self.assertRaises(ValueError, deldoc)
def test_doc_revs(self):
doc = {'bar': 42}
self.db['foo'] = doc
old_rev = doc['_rev']
doc['bar'] = 43
self.db['foo'] = doc
new_rev = doc['_rev']
new_doc = self.db.get('foo')
self.assertEqual(new_rev, new_doc['_rev'])
new_doc = self.db.get('foo', rev=new_rev)
self.assertEqual(new_rev, new_doc['_rev'])
old_doc = self.db.get('foo', rev=old_rev)
self.assertEqual(old_rev, old_doc['_rev'])
revs = [i for i in self.db.revisions('foo')]
self.assertEqual(revs[0]['_rev'], new_rev)
self.assertEqual(revs[1]['_rev'], old_rev)
gen = self.db.revisions('crap')
self.assertRaises(StopIteration, lambda: gen.next())
self.assertTrue(self.db.compact())
while self.db.info()['compact_running']:
pass
# 0.10 responds with 404, 0.9 responds with 500, same content
doc = 'fail'
try:
doc = self.db.get('foo', rev=old_rev)
except http.ServerError:
doc = None
assert doc is None
def test_attachment_crud(self):
doc = {'bar': 42}
self.db['foo'] = doc
old_rev = doc['_rev']
self.db.put_attachment(doc, 'Foo bar', 'foo.txt', 'text/plain')
self.assertNotEquals(old_rev, doc['_rev'])
doc = self.db['foo']
attachment = doc['_attachments']['foo.txt']
self.assertEqual(len('Foo bar'), attachment['length'])
self.assertEqual('text/plain', attachment['content_type'])
self.assertEqual('Foo bar',
self.db.get_attachment(doc, 'foo.txt').read())
self.assertEqual('Foo bar',
self.db.get_attachment('foo', 'foo.txt').read())
old_rev = doc['_rev']
self.db.delete_attachment(doc, 'foo.txt')
self.assertNotEquals(old_rev, doc['_rev'])
self.assertEqual(None, self.db['foo'].get('_attachments'))
def test_attachment_crud_with_files(self):
doc = {'bar': 42}
self.db['foo'] = doc
old_rev = doc['_rev']
fileobj = StringIO('Foo bar baz')
self.db.put_attachment(doc, fileobj, 'foo.txt')
self.assertNotEquals(old_rev, doc['_rev'])
doc = self.db['foo']
attachment = doc['_attachments']['foo.txt']
self.assertEqual(len('Foo bar baz'), attachment['length'])
self.assertEqual('text/plain', attachment['content_type'])
self.assertEqual('Foo bar baz',
self.db.get_attachment(doc, 'foo.txt').read())
self.assertEqual('Foo bar baz',
self.db.get_attachment('foo', 'foo.txt').read())
old_rev = doc['_rev']
self.db.delete_attachment(doc, 'foo.txt')
self.assertNotEquals(old_rev, doc['_rev'])
self.assertEqual(None, self.db['foo'].get('_attachments'))
def test_empty_attachment(self):
doc = {}
self.db['foo'] = doc
old_rev = doc['_rev']
self.db.put_attachment(doc, '', 'empty.txt')
self.assertNotEquals(old_rev, doc['_rev'])
doc = self.db['foo']
attachment = doc['_attachments']['empty.txt']
self.assertEqual(0, attachment['length'])
def test_default_attachment(self):
doc = {}
self.db['foo'] = doc
self.assertTrue(self.db.get_attachment(doc, 'missing.txt') is None)
sentinel = object()
self.assertTrue(self.db.get_attachment(doc, 'missing.txt', sentinel) is sentinel)
def test_attachment_from_fs(self):
tmpdir = tempfile.mkdtemp()
tmpfile = os.path.join(tmpdir, 'test.txt')
f = open(tmpfile, 'w')
f.write('Hello!')
f.close()
doc = {}
self.db['foo'] = doc
self.db.put_attachment(doc, open(tmpfile))
doc = self.db.get('foo')
self.assertTrue(doc['_attachments']['test.txt']['content_type'] == 'text/plain')
shutil.rmtree(tmpdir)
def test_attachment_no_filename(self):
doc = {}
self.db['foo'] = doc
self.assertRaises(ValueError, self.db.put_attachment, doc, '')
def test_json_attachment(self):
doc = {}
self.db['foo'] = doc
self.db.put_attachment(doc, '{}', 'test.json', 'application/json')
self.assertEquals(self.db.get_attachment(doc, 'test.json').read(), '{}')
def test_include_docs(self):
doc = {'foo': 42, 'bar': 40}
self.db['foo'] = doc
rows = list(self.db.query(
'function(doc) { emit(doc._id, null); }',
include_docs=True
))
self.assertEqual(1, len(rows))
self.assertEqual(doc, rows[0].doc)
def test_query_multi_get(self):
for i in range(1, 6):
self.db.save({'i': i})
res = list(self.db.query('function(doc) { emit(doc.i, null); }',
keys=range(1, 6, 2)))
self.assertEqual(3, len(res))
for idx, i in enumerate(range(1, 6, 2)):
self.assertEqual(i, res[idx].key)
def test_bulk_update_conflict(self):
docs = [
dict(type='Person', name='John Doe'),
dict(type='Person', name='Mary Jane'),
dict(type='City', name='Gotham City')
]
self.db.update(docs)
# update the first doc to provoke a conflict in the next bulk update
doc = docs[0].copy()
self.db[doc['_id']] = doc
results = self.db.update(docs)
self.assertEqual(False, results[0][0])
assert isinstance(results[0][2], http.ResourceConflict)
def test_bulk_update_all_or_nothing(self):
docs = [
dict(type='Person', name='John Doe'),
dict(type='Person', name='Mary Jane'),
dict(type='City', name='Gotham City')
]
self.db.update(docs)
# update the first doc to provoke a conflict in the next bulk update
doc = docs[0].copy()
doc['name'] = 'Jane Doe'
self.db[doc['_id']] = doc
results = self.db.update(docs, all_or_nothing=True)
self.assertEqual(True, results[0][0])
doc = self.db.get(doc['_id'], conflicts=True)
assert '_conflicts' in doc
revs = self.db.get(doc['_id'], open_revs='all')
assert len(revs) == 2
def test_bulk_update_bad_doc(self):
self.assertRaises(TypeError, self.db.update, [object()])
def test_copy_doc(self):
self.db['foo'] = {'status': 'testing'}
result = self.db.copy('foo', 'bar')
self.assertEqual(result, self.db['bar'].rev)
def test_copy_doc_conflict(self):
self.db['bar'] = {'status': 'idle'}
self.db['foo'] = {'status': 'testing'}
self.assertRaises(http.ResourceConflict, self.db.copy, 'foo', 'bar')
def test_copy_doc_overwrite(self):
self.db['bar'] = {'status': 'idle'}
self.db['foo'] = {'status': 'testing'}
result = self.db.copy('foo', self.db['bar'])
doc = self.db['bar']
self.assertEqual(result, doc.rev)
self.assertEqual('testing', doc['status'])
def test_copy_doc_srcobj(self):
self.db['foo'] = {'status': 'testing'}
self.db.copy(self.db['foo'], 'bar')
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_destobj_norev(self):
self.db['foo'] = {'status': 'testing'}
self.db.copy('foo', {'_id': 'bar'})
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_src_dictlike(self):
class DictLike(object):
def __init__(self, doc):
self.doc = doc
def items(self):
return self.doc.items()
self.db['foo'] = {'status': 'testing'}
self.db.copy(DictLike(self.db['foo']), 'bar')
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_dest_dictlike(self):
class DictLike(object):
def __init__(self, doc):
self.doc = doc
def items(self):
return self.doc.items()
self.db['foo'] = {'status': 'testing'}
self.db['bar'] = {}
self.db.copy('foo', DictLike(self.db['bar']))
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_src_baddoc(self):
self.assertRaises(TypeError, self.db.copy, object(), 'bar')
def test_copy_doc_dest_baddoc(self):
self.assertRaises(TypeError, self.db.copy, 'foo', object())
def test_changes(self):
self.db['foo'] = {'bar': True}
self.assertEqual(self.db.changes(since=0)['last_seq'], 1)
first = self.db.changes(feed='continuous').next()
self.assertEqual(first['seq'], 1)
self.assertEqual(first['id'], 'foo')
def test_changes_releases_conn(self):
# Consume an entire changes feed to read the whole response, then check
# that the HTTP connection made it to the pool.
list(self.db.changes(feed='continuous', timeout=0))
scheme, netloc = urlparse.urlsplit(client.DEFAULT_BASE_URL)[:2]
self.assertTrue(self.db.resource.session.connection_pool.conns[(scheme, netloc)])
def test_changes_releases_conn_when_lastseq(self):
# Consume a changes feed, stopping at the 'last_seq' item, i.e. don't
# let the generator run any further, then check the connection made it
# to the pool.
for obj in self.db.changes(feed='continuous', timeout=0):
if 'last_seq' in obj:
break
scheme, netloc = urlparse.urlsplit(client.DEFAULT_BASE_URL)[:2]
self.assertTrue(self.db.resource.session.connection_pool.conns[(scheme, netloc)])
def test_changes_conn_usable(self):
# Consume a changes feed to get a used connection in the pool.
list(self.db.changes(feed='continuous', timeout=0))
# Try using the connection again to make sure the connection was left
# in a good state from the previous request.
self.assertTrue(self.db.info()['doc_count'] == 0)
def test_changes_heartbeat(self):
def wakeup():
time.sleep(.3)
self.db.save({})
threading.Thread(target=wakeup).start()
for change in self.db.changes(feed='continuous', heartbeat=100):
break
def test_purge(self):
doc = {'a': 'b'}
self.db['foo'] = doc
self.assertEqual(self.db.purge([doc])['purge_seq'], 1)
def test_json_encoding_error(self):
doc = {'now': datetime.now()}
self.assertRaises(TypeError, self.db.save, doc)
class ViewTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_row_object(self):
row = list(self.db.view('_all_docs', keys=['blah']))[0]
self.assertEqual(repr(row), "<Row key=u'blah', error=u'not_found'>")
self.assertEqual(row.id, None)
self.assertEqual(row.key, 'blah')
self.assertEqual(row.value, None)
self.assertEqual(row.error, 'not_found')
self.db.save({'_id': 'xyz', 'foo': 'bar'})
row = list(self.db.view('_all_docs', keys=['xyz']))[0]
self.assertEqual(row.id, 'xyz')
self.assertEqual(row.key, 'xyz')
self.assertEqual(row.value.keys(), ['rev'])
self.assertEqual(row.error, None)
def test_view_multi_get(self):
for i in range(1, 6):
self.db.save({'i': i})
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'multi_key': {'map': 'function(doc) { emit(doc.i, null); }'}
}
}
res = list(self.db.view('test/multi_key', keys=range(1, 6, 2)))
self.assertEqual(3, len(res))
for idx, i in enumerate(range(1, 6, 2)):
self.assertEqual(i, res[idx].key)
def test_ddoc_info(self):
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'test': {'map': 'function(doc) { emit(doc.type, null); }'}
}
}
info = self.db.info('test')
self.assertEqual(info['view_index']['compact_running'], False)
def test_view_compaction(self):
for i in range(1, 6):
self.db.save({'i': i})
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'multi_key': {'map': 'function(doc) { emit(doc.i, null); }'}
}
}
self.db.view('test/multi_key')
self.assertTrue(self.db.compact('test'))
def test_view_cleanup(self):
for i in range(1, 6):
self.db.save({'i': i})
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'multi_key': {'map': 'function(doc) { emit(doc.i, null); }'}
}
}
self.db.view('test/multi_key')
ddoc = self.db['_design/test']
ddoc['views'] = {
'ids': {'map': 'function(doc) { emit(doc._id, null); }'}
}
self.db.update([ddoc])
self.db.view('test/ids')
self.assertTrue(self.db.cleanup())
def test_view_function_objects(self):
if 'python' not in self.server.config()['query_servers']:
return
for i in range(1, 4):
self.db.save({'i': i, 'j':2*i})
def map_fun(doc):
yield doc['i'], doc['j']
res = list(self.db.query(map_fun, language='python'))
self.assertEqual(3, len(res))
for idx, i in enumerate(range(1,4)):
self.assertEqual(i, res[idx].key)
self.assertEqual(2*i, res[idx].value)
def reduce_fun(keys, values):
return sum(values)
res = list(self.db.query(map_fun, reduce_fun, 'python'))
self.assertEqual(1, len(res))
self.assertEqual(12, res[0].value)
def test_init_with_resource(self):
self.db['foo'] = {}
view = client.PermanentView(self.db.resource('_all_docs').url, '_all_docs')
self.assertEquals(len(list(view())), 1)
def test_iter_view(self):
self.db['foo'] = {}
view = client.PermanentView(self.db.resource('_all_docs').url, '_all_docs')
self.assertEquals(len(list(view)), 1)
def test_tmpview_repr(self):
mapfunc = "function(doc) {emit(null, null);}"
view = client.TemporaryView(self.db.resource('_temp_view'), mapfunc)
self.assertTrue('TemporaryView' in repr(view))
self.assertTrue(mapfunc in repr(view))
def test_wrapper_iter(self):
class Wrapper(object):
def __init__(self, doc):
pass
self.db['foo'] = {}
self.assertTrue(isinstance(list(self.db.view('_all_docs', wrapper=Wrapper))[0], Wrapper))
def test_wrapper_rows(self):
class Wrapper(object):
def __init__(self, doc):
pass
self.db['foo'] = {}
self.assertTrue(isinstance(self.db.view('_all_docs', wrapper=Wrapper).rows[0], Wrapper))
def test_properties(self):
for attr in ['rows', 'total_rows', 'offset']:
self.assertTrue(getattr(self.db.view('_all_docs'), attr) is not None)
def test_rowrepr(self):
self.db['foo'] = {}
rows = list(self.db.query("function(doc) {emit(null, 1);}"))
self.assertTrue('Row' in repr(rows[0]))
self.assertTrue('id' in repr(rows[0]))
rows = list(self.db.query("function(doc) {emit(null, 1);}", "function(keys, values, combine) {return sum(values);}"))
self.assertTrue('Row' in repr(rows[0]))
self.assertTrue('id' not in repr(rows[0]))
class ShowListTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
show_func = """
function(doc, req) {
return {"body": req.id + ":" + (req.query.r || "<default>")};
}
"""
list_func = """
function(head, req) {
start({headers: {'Content-Type': 'text/csv'}});
if (req.query.include_header) {
send('id' + '\\r\\n');
}
var row;
while (row = getRow()) {
send(row.id + '\\r\\n');
}
}
"""
design_doc = {'_id': '_design/foo',
'shows': {'bar': show_func},
'views': {'by_id': {'map': "function(doc) {emit(doc._id, null)}"},
'by_name': {'map': "function(doc) {emit(doc.name, null)}"}},
'lists': {'list': list_func}}
def setUp(self):
super(ShowListTestCase, self).setUp()
# Workaround for possible bug in CouchDB. Adding a timestamp avoids a
# 409 Conflict error when pushing the same design doc that existed in a
# now deleted database.
design_doc = dict(self.design_doc)
design_doc['timestamp'] = time.time()
self.db.save(design_doc)
self.db.update([{'_id': '1', 'name': 'one'}, {'_id': '2', 'name': 'two'}])
def test_show_urls(self):
self.assertEqual(self.db.show('_design/foo/_show/bar')[1].read(), 'null:<default>')
self.assertEqual(self.db.show('foo/bar')[1].read(), 'null:<default>')
def test_show_docid(self):
self.assertEqual(self.db.show('foo/bar')[1].read(), 'null:<default>')
self.assertEqual(self.db.show('foo/bar', '1')[1].read(), '1:<default>')
self.assertEqual(self.db.show('foo/bar', '2')[1].read(), '2:<default>')
def test_show_params(self):
self.assertEqual(self.db.show('foo/bar', r='abc')[1].read(), 'null:abc')
def test_list(self):
self.assertEqual(self.db.list('foo/list', 'foo/by_id')[1].read(), '1\r\n2\r\n')
self.assertEqual(self.db.list('foo/list', 'foo/by_id', include_header='true')[1].read(), 'id\r\n1\r\n2\r\n')
def test_list_keys(self):
self.assertEqual(self.db.list('foo/list', 'foo/by_id', keys=['1'])[1].read(), '1\r\n')
def test_list_view_params(self):
self.assertEqual(self.db.list('foo/list', 'foo/by_name', startkey='o', endkey='p')[1].read(), '1\r\n')
self.assertEqual(self.db.list('foo/list', 'foo/by_name', descending=True)[1].read(), '2\r\n1\r\n')
class UpdateHandlerTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
update_func = """
function(doc, req) {
if (!doc) {
if (req.id) {
return [{_id : req.id}, "new doc"]
}
return [null, "empty doc"];
}
doc.name = "hello";
return [doc, "hello doc"];
}
"""
design_doc = {'_id': '_design/foo',
'language': 'javascript',
'updates': {'bar': update_func}}
def setUp(self):
super(UpdateHandlerTestCase, self).setUp()
# Workaround for possible bug in CouchDB. Adding a timestamp avoids a
# 409 Conflict error when pushing the same design doc that existed in a
# now deleted database.
design_doc = dict(self.design_doc)
design_doc['timestamp'] = time.time()
self.db.save(design_doc)
self.db.update([{'_id': 'existed', 'name': 'bar'}])
def test_empty_doc(self):
self.assertEqual(self.db.update_doc('foo/bar')[1].read(), 'empty doc')
def test_new_doc(self):
self.assertEqual(self.db.update_doc('foo/bar', 'new')[1].read(), 'new doc')
def test_update_doc(self):
self.assertEqual(self.db.update_doc('foo/bar', 'existed')[1].read(), 'hello doc')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ServerTestCase, 'test'))
suite.addTest(unittest.makeSuite(DatabaseTestCase, 'test'))
suite.addTest(unittest.makeSuite(ViewTestCase, 'test'))
suite.addTest(unittest.makeSuite(ShowListTestCase, 'test'))
suite.addTest(unittest.makeSuite(UpdateHandlerTestCase, 'test'))
suite.addTest(doctest.DocTestSuite(client))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| ryanolson/couchdb-python | couchdb/tests/client.py | Python | bsd-3-clause | 26,584 |
#!/usr/bin/env python
"""
heatsequer plot window gui (2nd plot window) module
imported from plotwin.py when you plotexp() and set usegui=True
"""
# amnonscript
__version__ = "0.91"
import heatsequer as hs
import os
import sys
import numpy as np
import matplotlib as mpl
mpl.use('Qt5Agg')
import matplotlib.pyplot as plt
from matplotlib.pyplot import *
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from PyQt5 import QtGui, QtCore, QtWidgets, uic
from PyQt5.QtCore import Qt
#from PyQt4 import QtGui
from PyQt5.QtWidgets import QCompleter,QMessageBox,QListWidgetItem
from PyQt5.QtCore import QStringListModel
import pickle
# for debugging - use XXX()
from pdb import set_trace as XXX
""""
for the GUI
"""
class SListWindow(QtWidgets.QDialog):
def __init__(self,listdata=[],listname=''):
"""
create a list window with items in the list and the listname as specified
input:
listdata - the data to show in the list (a list)
listname - name to display above the list
"""
super(SListWindow, self).__init__()
# uic.loadUi('./ui/listwindow.py', self)
uic.loadUi(os.path.join(hs.heatsequerdir,'ui/listwindow.py'), self)
# uic.loadUi(hs.get_data_path('listwindow.py','ui'), self)
for citem in listdata:
self.lList.addItem(citem)
if listname:
self.lLabel.setText(listname)
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
# We want the axes cleared every time plot() is called
# self.axes.hold(False)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class PlotGUIWindow(QtWidgets.QDialog):
cexp=[]
def __init__(self,expdat):
super(PlotGUIWindow, self).__init__()
hs.Debug(1,hs.get_data_path('plotguiwindow.py','ui'))
uic.loadUi(os.path.join(hs.heatsequerdir,'ui/plotguiwindow.py'), self)
self.bGetSequence.clicked.connect(self.getsequence)
self.bExport.clicked.connect(self.export)
self.bView.clicked.connect(self.view)
self.bSave.clicked.connect(self.save)
self.bDBSave.clicked.connect(self.dbsave)
self.bEnrich.clicked.connect(self.enrich)
self.bExpInfo.clicked.connect(self.expinfo)
self.bSampleInfo.clicked.connect(self.sampleinfo)
self.lCoolDB.doubleClicked.connect(self.showannotation)
self.cSampleField.activated.connect(self.samplefield)
self.FigureTab.currentChanged.connect(self.tabchange)
self.cSampleField.setCurrentIndex(0)
self.cexp=expdat
self.selectionlines={}
self.selection=[]
self.setWindowTitle(self.cexp.studyname)
for cfield in self.cexp.fields:
self.cSampleField.addItem(cfield)
self.cPlotXField.addItem(cfield)
if self.cexp.seqdb:
ontofields,ontonames=hs.bactdb.getontonames(self.cexp.seqdb)
for conto in ontofields:
# for conto in self.cexp.seqdb.OntoGraph.keys():
self.cOntology.addItem(conto)
self.dc=None
self.createaddplot(useqt=True)
# right click menu
self.lCoolDB.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.lCoolDB.customContextMenuRequested.connect(self.listItemRightClicked)
def listItemRightClicked(self, QPos):
self.listMenu= QtWidgets.QMenu()
menuitem = self.listMenu.addAction("Delete annotation")
menuitem.triggered.connect(self.menuDeleteAnnotation)
parentPosition = self.lCoolDB.mapToGlobal(QtCore.QPoint(0, 0))
self.listMenu.move(parentPosition + QPos)
self.listMenu.show()
def menuDeleteAnnotation(self):
if len(self.lCoolDB.selectedItems())>1:
print('more than 1 item')
for citem in self.lCoolDB.selectedItems():
cdetails=citem.data(Qt.UserRole)
if cdetails is None:
print('no details')
return
annotationid=cdetails['annotationid']
qres=QtWidgets.QMessageBox.warning(self,"Delete annotation","Are you sure you want to delete annotation %d?\nThis cannot be undone" % annotationid,QtWidgets.QMessageBox.Yes,QtWidgets.QMessageBox.Cancel)
if qres==QtWidgets.QMessageBox.Cancel:
return
hs.supercooldb.delete_annotation(hs.scdb,annotationid)
def showannotation(self):
citem=self.lCoolDB.currentItem()
cdetails=citem.data(Qt.UserRole)
print('-----')
print(cdetails)
showannotationdata(cdetails)
def createaddplot(self,useqt=True):
"""
create the additional figure for the ontology/line plots
input:
useqt : boolean
True to embed the plot in the qtgui window, false to open a new figure window (so don't need the qtagg)
"""
if useqt:
# add the matplotlib figure
self.frame = QtWidgets.QWidget(self)
self.dc = MyMplCanvas(self.frame, width=5, height=4, dpi=100)
# add it to an hboxlayout to make it resize with window
layout = QtWidgets.QHBoxLayout(self)
layout.insertSpacing(0,250)
# layout.addWidget(self.dc)
# self.setLayout(layout)
layout2 = QtWidgets.QVBoxLayout()
layout.addLayout(layout2)
layout2.addWidget(self.dc)
self.mpl_toolbar = NavigationToolbar(self.dc, self)
layout2.addWidget(self.mpl_toolbar)
self.setLayout(layout)
else:
addfig=plt.figure()
addax=addfig.add_subplot(1,1,1)
# addax.hold(False)
self.dc=addax
def sampleinfo(self):
if not self.csamp:
return
csamp=self.cexp.samples[self.csamp]
cmap=self.cexp.smap[csamp]
info=[]
for k,v in cmap.items():
info.append(k+':'+v)
slistwin = SListWindow(info,cmap['#SampleID'])
slistwin.exec_()
def tabchange(self,newtab):
hs.Debug(0,"new tab",newtab)
if newtab==2:
self.plotxgraph()
if newtab==1:
self.plotontology()
def plotontology(self):
if self.cexp.seqdb:
self.dc.axes.clear()
hs.Debug(2,"plotting taxonomy for seq %s onto %s" % (self.cexp.seqs[self.cseq],self.cexp.ontofigname))
hs.bactdb.PlotOntologyGraph(self.cexp.seqdb,self.cexp.seqs[self.cseq],field=str(self.cOntology.currentText()),toax=self.dc.axes)
self.dc.draw()
def plotxgraph(self):
if self.dc is None:
self.createaddplot()
self.dc.axes.clear()
seqs=self.getselectedseqs()
if self.cPlotNormalizeY.checkState()==0:
normalizey=False
else:
normalizey=True
if self.cPlotXNumeric.checkState()==0:
xfield=False
else:
xfield=str(self.cPlotXField.currentText())
hs.plotseqfreq(self.cexp,seqs=seqs,toaxis=self.dc.axes,normalizey=normalizey,xfield=xfield)
# is this needed?
# self.dc.draw()
self.dc.figure.canvas.draw_idle()
def samplefield(self,qstr):
cfield=str(qstr)
self.lSampleFieldVal.setText(self.cexp.smap[self.cexp.samples[self.csamp]][cfield])
def getsequence(self):
seq=self.cexp.seqs[self.cseq]
val,ok=QtWidgets.QInputDialog.getText(self,'Sequence',self.cexp.tax[self.cseq],text=seq)
def view(self):
slist=[]
for cseq in self.selection:
slist.append(self.cexp.tax[cseq]+'-'+str(self.cexp.sids[cseq]))
val,ok=QtWidgets.QInputDialog.getItem(self,'Selected bacteria','',slist)
def getselectedseqs(self):
slist=[]
for cseq in self.selection:
slist.append(self.cexp.seqs[cseq])
return slist
def dbsave(self):
"""
save the selected list to the coolseq database
"""
val,ok=QtWidgets.QInputDialog.getText(self,'Save %d bacteria to coolseqDB' % len(self.selection),'Enter description')
hs.Debug(1,ok)
if ok:
seqs=[]
for cid in self.selection:
seqs.append(self.cexp.seqs[cid])
hs.cooldb.savecoolseqs(self.cexp,self.cexp.cdb,seqs,val)
def enrich(self):
"""
check for annotation enrichment for selected sequences (compared to other sequences in this experiment)
"""
if not self.cexp.cdb:
hs.Debug(8,'No cooldb loaded')
return
selseqs=[]
for cid in self.selection:
selseqs.append(self.cexp.seqs[cid])
bmd=hs.cooldb.testenrichment(self.cexp.cdb,self.cexp.seqs,selseqs)
# bmd=hs.annotationenrichment(self.cexp,selseqs)
hs.Debug(6,'found %d items' % len(bmd))
if len(bmd)>0:
slistwin = SListWindow(listname='Enrichment')
bmd=hs.sortenrichment(bmd)
for cbmd in bmd:
if cbmd['observed']<cbmd['expected']:
ccolor=QtGui.QColor(155,0,0)
else:
ccolor=QtGui.QColor(0,155,0)
item = QtWidgets.QListWidgetItem()
item.setText("%s (p:%f o:%d e:%f)" % (cbmd['description'],cbmd['pval'],cbmd['observed'],cbmd['expected']))
item.setForeground(ccolor)
slistwin.lList.addItem(item)
print("%s (p:%f o:%d e:%f)" % (cbmd['description'],cbmd['pval'],cbmd['observed'],cbmd['expected']))
slistwin.exec_()
def save(self):
"""
save the selected list to a fasta file
"""
fname = str(QtWidgets.QFileDialog.getSaveFileName(self, 'Save selection fasta file name','pita'))
slist=[]
for cseq in self.selection:
slist.append(self.cexp.seqs[cseq])
hs.saveseqsfasta(self.cexp,slist,fname)
hs.Debug(6,'Saved %d sequences to file %s' % (len(slist),fname))
def export(self):
"""
export the selected bacteria list to the global variable 'selectlist'
"""
global selectlist
hs.Debug(0,'exporting')
selectlist=[]
for cseq in self.selection:
selectlist.append(self.cexp.seqs[cseq])
def updateinfo(self,csamp,cseq):
"""
update the information about the sample/bacteria
"""
self.csamp=csamp
self.cseq=cseq
self.lSample.setText(self.cexp.samples[self.csamp])
self.lTaxonomy.setText(self.cexp.tax[self.cseq])
self.lID.setText(str(self.cexp.sids[self.cseq]))
self.lReads.setText('%f' % (float(self.cexp.data[self.cseq,self.csamp])/100))
self.lSampleFieldVal.setText(self.cexp.smap[self.cexp.samples[self.csamp]][str(self.cSampleField.currentText())])
# update the stats about the database:
if self.cexp.seqdb:
self.lStudies.clear()
totappear,numstudies,allstudies,studysamples,totdbsamples=hs.bactdb.GetSeqInfo(self.cexp.seqdb,self.cexp.seqs[self.cseq])
if totappear>0:
self.lNumSamples.setText(str('%d/%dK' % (totappear,int(totdbsamples/1000))))
self.lNumStudies.setText(str(numstudies))
res=list(studysamples.items())
vlens=[]
for cv in res:
totsamps=hs.bactdb.SamplesInStudy(self.cexp.seqdb,cv[0])
vlens.append(float(len(cv[1]))/len(totsamps))
sv,si=hs.isort(vlens,reverse=True)
for cind in si:
studyname=hs.bactdb.StudyNameFromID(self.cexp.seqdb,res[cind][0])
self.lStudies.addItem('%s (%f)' % (studyname,vlens[cind]))
else:
self.lNumSamples.setText(str('%d/%dK' % (0,int(totdbsamples/1000))))
self.lNumStudies.setText("0")
if self.FigureTab.currentIndex()==2:
self.plotxgraph()
if self.FigureTab.currentIndex()==1:
self.plotontology()
def updatecdb(self,info):
"""
update the coolseq database info for the bacteria
by adding all lines in list to the listbox
"""
self.lCoolDB.clear()
self.addtocdblist(info)
def addtocdblist(self,info):
"""
add to cdb list without clearing
"""
for cinfo in info:
# test if the supercooldb annotation
if type(cinfo)==tuple:
details=cinfo[0]
newitem=QListWidgetItem(cinfo[1])
newitem.setData(Qt.UserRole,details)
if details['annotationtype']=='diffexp':
ccolor=QtGui.QColor(0,0,200)
elif details['annotationtype']=='contamination':
ccolor=QtGui.QColor(200,0,0)
elif details['annotationtype']=='common':
ccolor=QtGui.QColor(0,200,0)
elif details['annotationtype']=='highfreq':
ccolor=QtGui.QColor(0,200,0)
else:
ccolor=QtGui.QColor(0,0,0)
newitem.setForeground(ccolor)
self.lCoolDB.addItem(newitem)
else:
self.lCoolDB.addItem(cinfo)
def selectbact(self,bactlist,flip=True):
"""
add bacteria from the list bactlist (position in exp) to the selection
flip - if true, if bacteria from list is already in selection, remove it
"""
for cseq in bactlist:
# if already in list and can flip, remove from list instead
if flip:
if cseq in self.selectionlines:
self.clearselection([cseq])
hs.Debug(0,'Flip')
continue
if cseq in self.selectionlines:
continue
cline=self.plotax.plot([-0.5,len(self.cexp.samples)-0.5],[cseq,cseq],':w')
self.selectionlines[cseq]=cline
self.selection.append(cseq)
self.plotfig.canvas.draw()
self.lSelection.setText('%d bacteria' % len(self.selection))
def clearselection(self,seqlist=False):
if not seqlist:
seqlist=list(self.selectionlines.keys())
for cseq in seqlist:
cline=self.selectionlines[cseq]
self.plotax.lines.remove(cline[0])
del self.selectionlines[cseq]
self.selection.remove(cseq)
self.plotfig.canvas.draw()
self.lSelection.setText('%d bacteria' % len(self.selection))
def expinfo(self):
# get the selected sequences
sequences=[]
for cid in self.selection:
sequences.append(self.cexp.seqs[cid])
self.cexp.selectedseqs=sequences
dbs = DBAnnotateSave(self.cexp)
res=dbs.exec_()
if res==QtWidgets.QDialog.Accepted:
# fl=open('/Users/amnon/Python/git/heatsequer/db/ontologyfromid.pickle','rb')
# ontologyfromid=pickle.load(fl)
# fl.close()
ontologyfromid=hs.scdb.ontologyfromid
description=str(dbs.bdescription.text())
# TODO: need to get primer region!!!!
primerid='V4'
method=str(dbs.bmethod.text())
if method=='':
method='na'
submittername='Amnon Amir'
curations=[]
# if it is differential abundance
for citem in qtlistiteritems(dbs.blistall):
cdat=qtlistgetdata(citem)
cval=cdat['value']
ctype=cdat['type']
if cval in ontologyfromid:
cval=ontologyfromid[cval]
else:
hs.Debug(1,"item %s not found in ontologyfromid" % cval)
curations.append((ctype,cval))
if dbs.bdiffpres.isChecked():
curtype='DIFFEXP'
elif dbs.bisa.isChecked():
curtypeval=dbs.bisatype.currentText()
if 'Common' in curtypeval:
curtype='COMMON'
elif 'Contam' in curtypeval:
curtype='CONTAMINATION'
elif 'High' in curtypeval:
curtype='HIGHFREQ'
else:
curtype='OTHER'
else:
hs.Debug(9,"No annotation type selected")
return
scdb=hs.scdb
cdata=hs.supercooldb.finddataid(scdb,datamd5=self.cexp.datamd5,mapmd5=self.cexp.mapmd5)
# if study not in database, ask to add some metadata for it
if cdata is None:
okcontinue=False
while not okcontinue:
hs.Debug(6,'study data info not found based on datamd5, mapmd5. need to add one!!!')
qres=QtWidgets.QMessageBox.warning(self,"No study data","No information added about study data. Add info?",QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No,QtWidgets.QMessageBox.Cancel)
if qres==QtWidgets.QMessageBox.Cancel:
return
if qres==QtWidgets.QMessageBox.No:
cdata=hs.supercooldb.addexpdata(scdb,( ('DataMD5',self.cexp.datamd5), ('MapMD5',self.cexp.mapmd5) ) )
okcontinue=True
if qres==QtWidgets.QMessageBox.Yes:
okcontinue=getstudydata(self.cexp)
cdata=hs.supercooldb.finddataid(scdb,datamd5=self.cexp.datamd5,mapmd5=self.cexp.mapmd5)
hs.Debug(1,'new cdata is %s' % cdata)
hs.Debug(6,'Data found. id is %s' % cdata)
hs.supercooldb.addannotations(scdb,expid=cdata,sequences=sequences,annotationtype=curtype,annotations=curations,submittername=submittername,description=description,method=method,primerid=primerid)
# store the history
try:
hs.lastcurations.append(curations)
except:
hs.lastcurations=[curations]
hs.lastdatamd5=self.cexp.datamd5
class DBStudyAnnotations(QtWidgets.QDialog):
def __init__(self,studyid):
super(DBStudyAnnotations, self).__init__()
uic.loadUi(os.path.join(hs.heatsequerdir,'ui/annotationlist.py'), self)
scdb=hs.scdb
self.scdb=scdb
self.studyid=studyid
info=hs.supercooldb.getexpannotations(scdb,studyid)
for cinfo in info:
self.blist.addItem(cinfo)
self.bdetails.clicked.connect(self.details)
def details(self):
items=self.blist.selectedItems()
if len(items)==0:
return
print(str(items[0].text()))
class DBStudyInfo(QtWidgets.QDialog):
def __init__(self,expdat):
super(DBStudyInfo, self).__init__()
uic.loadUi(os.path.join(hs.heatsequerdir,'ui/studyinfo.py'), self)
scdb=hs.scdb
self.scdb=scdb
self.dataid=0
dataid=hs.supercooldb.finddataid(scdb,datamd5=expdat.datamd5,mapmd5=expdat.mapmd5)
if dataid is not None:
info=hs.supercooldb.getexperimentinfo(scdb,dataid)
for cinfo in info:
qtlistadd(self.blist,cinfo[0]+':'+cinfo[1],{'fromdb':True,'type':cinfo[0],'value':cinfo[1]},color='grey')
self.dataid=dataid
else:
qtlistadd(self.blist,"DataMD5:%s" % expdat.datamd5,{'fromdb':False,'type':"DataMD5",'value':expdat.datamd5},color='black')
qtlistadd(self.blist,"MapMD5:%s" % expdat.mapmd5,{'fromdb':False,'type':"MapMD5",'value':expdat.mapmd5},color='black')
self.bplus.clicked.connect(self.plus)
self.bvalue.returnPressed.connect(self.plus)
self.bminus.clicked.connect(self.minus)
self.bannotations.clicked.connect(self.annotations)
self.cexp=expdat
self.setWindowTitle(self.cexp.studyname)
self.prepstudyinfo()
self.bvalue.setFocus()
def keyPressEvent(self, e):
"""
override the enter event so will not close dialog
"""
e.ignore()
def addentry(self,fromdb,ctype,value,color='black'):
if len(ctype)>0 and len(value)>0:
newentry='%s:%s' % (ctype,value)
for citem in getqtlistitems(self.blist):
if citem==newentry:
hs.Debug(2,'item already in list %s' % newentry)
return
qtlistadd(self.blist,newentry,{'fromdb':False,'type':ctype,'value':value},color="black")
def plus(self):
ctype=str(self.btype.currentText())
cval=str(self.bvalue.text())
self.addentry(fromdb=False,ctype=ctype,value=cval,color='black')
self.bvalue.setText('')
def minus(self):
items=self.blist.selectedItems()
for citem in items:
cdata=qtlistgetdata(citem)
if cdata['fromdb']:
print('delete from db')
self.blist.takeItem(self.blist.row(citem))
def annotations(self):
dbsa = DBStudyAnnotations(self.dataid)
dbsa.exec_()
def prepstudyinfo(self):
"""
add the study info from the mapping file if available
"""
fieldlist=[('SRA_Study_s','sra'),('project_name_s','name'),('experiment_title','name'),('experiment_design_description','name'),('BioProject_s','sra')]
cexp=self.cexp
for (cfield,infofield) in fieldlist:
if cfield in cexp.fields:
uvals=hs.getfieldvals(cexp,cfield,ounique=True)
if len(uvals)==1:
self.addentry(fromdb=False,ctype=infofield,value=uvals[0].lower(),color='black')
class DBAnnotateSave(QtWidgets.QDialog):
def __init__(self,expdat):
super(DBAnnotateSave, self).__init__()
print("DBAnnotateSave")
uic.loadUi(os.path.join(hs.heatsequerdir,'ui/manualdata.py'), self)
self.bplus.clicked.connect(self.plus)
self.bminus.clicked.connect(self.minus)
self.bontoinput.returnPressed.connect(self.plus)
self.bstudyinfo.clicked.connect(self.studyinfo)
self.bisa.toggled.connect(self.radiotoggle)
self.bdiffpres.toggled.connect(self.radiotoggle)
self.bisatype.currentIndexChanged.connect(self.isatypechanged)
self.bhistory.clicked.connect(self.history)
self.cexp=expdat
self.lnumbact.setText(str(len(expdat.selectedseqs)))
completer = QCompleter()
self.bontoinput.setCompleter(completer)
scdb=hs.scdb
self.scdb=scdb
self.dataid=hs.supercooldb.finddataid(scdb,datamd5=self.cexp.datamd5,mapmd5=self.cexp.mapmd5)
model = QStringListModel()
completer.setModel(model)
# completer.setCompletionMode(QCompleter.InlineCompletion)
completer.maxVisibleItems=10
completer.setCaseSensitivity(Qt.CaseInsensitive)
# make the completer selection also erase the text edit
completer.activated.connect(self.cleartext,type=Qt.QueuedConnection)
# in qt5 should work with middle complete as well...
# completer.setFilterMode(Qt.MatchContains)
if not hs.scdb.ontologyfromid:
hs.scdb=hs.supercooldb.loaddbonto(hs.scdb)
self.ontology=hs.scdb.ontology
self.ontologyfromid=hs.scdb.ontologyfromid
nlist=list(self.ontology.keys())
# nlist=sorted(nlist)
nlist=sorted(nlist, key=lambda s: s.lower())
print("sorted ontology")
model.setStringList(nlist)
self.setWindowTitle(self.cexp.studyname)
try:
tt=hs.lastdatamd5
except:
hs.lastdatamd5=''
if self.cexp.datamd5==hs.lastdatamd5:
self.fillfromcuration(hs.lastcurations[-1],onlyall=True)
self.prefillinfo()
self.bontoinput.setFocus()
def history(self):
curtext=[]
for cur in hs.lastcurations:
ct=''
for dat in cur:
ct+=dat[0]+'-'+dat[1]+','
curtext.append(ct)
slistwin = SListWindow(curtext,'select curation from history')
res=slistwin.exec_()
if res:
items=slistwin.lList.selectedItems()
for citem in items:
print(citem)
spos=slistwin.lList.row(citem)
print(spos)
self.fillfromcuration(hs.lastcurations[spos],onlyall=False)
def fillfromcuration(self,curation,onlyall=True,clearit=True):
"""
fill gui list from curation
input:
curation : from hs.lastcurations
onlyall : bool
True to show only curations which have ALL, False to show also HIGH/LOW
clearit : bool
True to remove previous curations from list, False to keep
"""
if clearit:
self.blistall.clear()
for cdat in curation:
if onlyall:
if cdat[0]!='ALL':
continue
self.addtolist(cdat[0],cdat[1])
def radiotoggle(self):
if self.bisa.isChecked():
self.blow.setDisabled(True)
self.bhigh.setDisabled(True)
if self.bdiffpres.isChecked():
self.blow.setEnabled(True)
self.bhigh.setEnabled(True)
def isatypechanged(self):
"""
changed the selection of isatype combobox so need to activate the isa radio button
"""
self.bisa.setChecked(True)
def studyinfo(self):
getstudydata(self.cexp)
def keyPressEvent(self, e):
"""
override the enter event so will not close dialog
"""
# print(e.key())
e.ignore()
def minus(self):
"""
delete selected item from current list
"""
items=self.blistall.selectedItems()
for citem in items:
self.blistall.takeItem(self.blistall.row(citem))
def cleartext(self):
self.bontoinput.setText('')
def plus(self):
conto=str(self.bontoinput.text())
cgroup=self.getontogroup()
self.addtolist(cgroup,conto)
self.cleartext()
def addtolist(self,cgroup,conto):
"""
add an ontology term to the list
input:
cgroup : str
the group (i.e. 'low/high/all')
conto : str
the ontology term to add
"""
if conto=='':
hs.Debug(2,'no string to add to list')
return
print('addtolist %s %s' % (cgroup,conto))
if conto in self.ontology:
conto=self.ontologyfromid[self.ontology[conto]]
else:
hs.Debug(1,'Not in ontology!!!')
# TODO: add are you sure... not in ontology list....
# if item already in list, don't do anything
for citem in qtlistiteritems(self.blistall):
cdata=qtlistgetdata(citem)
if cdata['value']==conto:
hs.Debug(2,'item already in list')
return
if cgroup=='LOW':
ctext="LOW:%s" % conto
qtlistadd(self.blistall,ctext, {'type':'LOW','value':conto},color='red')
if cgroup=='HIGH':
ctext="HIGH:%s" % conto
qtlistadd(self.blistall,ctext, {'type':'HIGH','value':conto},color='blue')
if cgroup=='ALL':
ctext="ALL:%s" % conto
qtlistadd(self.blistall,ctext, {'type':'ALL','value':conto},color='black')
def getontogroup(self):
if self.ball.isChecked():
return('ALL')
if self.blow.isChecked():
return('LOW')
if self.bhigh.isChecked():
return('HIGH')
def prefillinfo(self):
"""
prefill "ALL" data fields based on mapping file
if all samples have same info
"""
hs.Debug(1,'prefill info')
ontologyfromid=self.ontologyfromid
# fl=open('/Users/amnon/Python/git/heatsequer/db/ncbitaxontofromid.pickle','rb')
fl=open(os.path.join(hs.heatsequerdir,'db/ncbitaxontofromid.pickle'),'rb')
ncbitax=pickle.load(fl)
fl.close()
cexp=self.cexp
for cfield in cexp.fields:
uvals=[]
if cfield in cexp.fields:
uvals=hs.getfieldvals(cexp,cfield,ounique=True)
# if we have 1 value
if len(uvals)==1:
cval=uvals[0]
hs.Debug(1,'found 1 value %s' % cval)
if cfield=='HOST_TAXID' or cfield=='host_taxid':
hs.Debug(2,'%s field has 1 value %s' % (cfield,cval))
# if ncbi taxonomy (field used differently)
cval='NCBITaxon:'+cval
if cval in ncbitax:
hs.Debug(2,'found in ncbitax %s' % cval)
cval=ncbitax[cval]
else:
# get the XXX from ENVO:XXX value
uvalspl=cval.split(':',1)
if len(uvalspl)>1:
cval=uvalspl[1]
cval=uvalspl[1]+' :'+uvalspl[0]
if cval in self.ontology:
cval=ontologyfromid[self.ontology[cval]]
hs.Debug(2,'term %s found in ontologyfromid' % cval)
conto=cval
hs.Debug(1,'add prefill %s' % conto)
self.addtolist('ALL',conto)
else:
hs.Debug(3,'term %s NOT found in ontologyfromid' % uvals[0])
else:
hs.Debug(1,'found %d values' % len(uvals))
def getqtlistitems(qtlist):
"""
get a list of strings of the qtlist
input:
qtlist : QTListWidget
output:
item : list of str
"""
items = []
for index in range(qtlist.count()):
items.append(str(qtlist.item(index).text()))
return items
def qtlistadd(qtlist,text,data,color="black"):
"""
Add an entry (text) to qtlist and associaxte metadata data
input:
qtlist : QTListWidget
text : str
string to add to list
data : arbitrary python var
the data to associate with the item (get it by qtlistgetdata)
color : (R,G,B)
the color of the text in the list
"""
item = QtWidgets.QListWidgetItem()
item.setText(text)
ccol=QtGui.QColor()
ccol.setNamedColor(color)
item.setForeground(ccol)
item.setData(Qt.UserRole,data)
qtlist.addItem(item)
def qtlistgetdata(item):
"""
Get the metadata associated with item as position pos
input:
qtlist : QtListWidget
index : QtListWidgetItem
the item to get the info about
output:
data : arbitrary
the data associated with the item (using qtlistadd)
"""
# item=qtlist.item(index)
if sys.version_info[0] < 3:
# QVariant version 1 API (python2 default)
data=item.data(Qt.UserRole).toPyObject()
else:
# QVariant version 2 API (python3 default)
data=item.data(Qt.UserRole)
return data
def qtlistiteritems(qtlist):
"""
iterate all items in a list
input:
qtlist : QtListWidget
"""
for i in range(qtlist.count()):
yield qtlist.item(i)
def getstudydata(cexp):
"""
open the study info window and show/get new references for the study data
input:
cexp : Experiment
the experiment for which to show the data (uses the datamd5 and mapmd5)
output:
hasdata : Bool
True if the study has data, False if not
"""
dbsi = DBStudyInfo(cexp)
res=dbsi.exec_()
if res==QtWidgets.QDialog.Accepted:
newstudydata=[]
allstudydata=[]
for citem in qtlistiteritems(dbsi.blist):
cdata=qtlistgetdata(citem)
allstudydata.append( (cdata['type'],cdata['value']) )
if cdata['fromdb']==False:
newstudydata.append( (cdata['type'],cdata['value']) )
if len(newstudydata)==0:
hs.Debug(6,'No new items. not saving anything')
return True
# look if study already in table
cid=hs.supercooldb.finddataid(dbsi.scdb,datamd5=cexp.datamd5,mapmd5=cexp.mapmd5)
if cid is None:
hs.Debug(6,'no studyid found for datamd5 %s, mapmd5 %s' % (cexp.datamd5,cexp.mapmd5))
# cdata=hs.supercooldb.addexpdata(scdb,( ('DataMD5',cexp.datamd5), ('MapMD5',cexp.mapmd5) ) )
hs.Debug(3,'Adding to new experiment')
dataid=hs.supercooldb.addexpdata(dbsi.scdb,newstudydata,studyid=cid)
hs.Debug(6,'Study data saved to id %d' % dataid)
if len(allstudydata)>2:
return True
return False
def showannotationdata(annotationdetails):
"""
show the list of annotation details and the sequences associated with it
intput:
annotationdetails : dict
dict of various fields of the annotation (includeing annotationid)
from scdb.getannotationstrings()
cexp : experiment
the experiment (for rhe scdb pointer)
"""
info=[]
if annotationdetails is None:
return
for k,v in annotationdetails.items():
if type(v)==list:
for cv in v:
info.append('%s:%s' % (k,cv))
else:
info.append('%s:%s' % (k,v))
# get the annotation sequences:
if 'annotationid' in annotationdetails:
seqs=hs.supercooldb.getannotationseqs(hs.scdb,annotationdetails['annotationid'])
info.append('sequences: %d' % len(seqs))
# get the experiment details:
if 'expid' in annotationdetails:
expinfo=hs.supercooldb.getexperimentinfo(hs.scdb,annotationdetails['expid'])
for cinfo in expinfo:
info.append('experiment %s:%s' % (cinfo[0],cinfo[1]))
slistwin = SListWindow(info,'Annotation details')
slistwin.exec_()
| amnona/heatsequer | heatsequer/plots/plotwingui.py | Python | bsd-3-clause | 28,472 |
import sys
import time
import traceback
import dis
from browser import document as doc, window, alert
from javascript import JSObject
# set height of container to 66% of screen
_height = doc.documentElement.clientHeight
_s = doc['container']
_s.style.height = '%spx' % int(_height * 0.66)
has_ace = True
try:
editor = window.ace.edit("editor")
session = editor.getSession()
session.setMode("ace/mode/python")
editor.setOptions({
'enableLiveAutocompletion': True,
'enableSnippets': True,
'highlightActiveLine': False,
'highlightSelectedWord': True
})
except:
from browser import html
editor = html.TEXTAREA(rows=20, cols=70)
doc["editor"] <= editor
def get_value(): return editor.value
def set_value(x):editor.value = x
editor.getValue = get_value
editor.setValue = set_value
has_ace = False
if sys.has_local_storage:
from browser.local_storage import storage
else:
storage = None
if 'set_debug' in doc:
__BRYTHON__.debug = int(doc['set_debug'].checked)
def reset_src():
if storage is not None and "py_src" in storage:
editor.setValue(storage["py_src"])
else:
editor.setValue('for i in range(10):\n\tprint(i)')
editor.scrollToRow(0)
editor.gotoLine(0)
def reset_src_area():
if storage and "py_src" in storage:
editor.value = storage["py_src"]
else:
editor.value = 'for i in range(10):\n\tprint(i)'
class cOutput:
def write(self, data):
doc["console"].value += str(data)
def flush(self):
pass
sys.stdout = cOutput()
sys.stderr = cOutput()
def to_str(xx):
return str(xx)
info = sys.implementation.version
doc['version'].text = '%s.%s.%s' % (info.major, info.minor, info.micro)
output = ''
def show_console(ev):
doc["console"].value = output
doc["console"].cols = 60
# load a Python script
def load_script(evt):
_name = evt.target.value + '?foo=%s' % time.time()
editor.setValue(open(_name).read())
# run a script, in global namespace if in_globals is True
def run(in_globals=False):
global output
doc["console"].value = ''
src = editor.getValue()
if storage is not None:
storage["py_src"] = src
t0 = time.perf_counter()
try:
if(in_globals):
exec(src)
else:
ns = {}
exec(src, ns)
state = 1
except Exception as exc:
traceback.print_exc(file=sys.stderr)
state = 0
output = doc["console"].value
print('<completed in %6.2f ms>' % ((time.perf_counter() - t0) * 1000.0))
return state
def show_js(ev):
src = editor.getValue()
doc["console"].value = dis.dis(src)
if has_ace:
reset_src()
else:
reset_src_area()
| firmlyjin/brython | www/tests/editor.py | Python | bsd-3-clause | 2,743 |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class StaticgenAppConfig(AppConfig):
name = 'staticgen'
verbose_name = _('StaticGen')
| mishbahr/django-staticgen | staticgen/apps.py | Python | bsd-3-clause | 186 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# polyencoder
# Copyright 2015 Neil Freeman [email protected]
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import sys
import csv
from urllib import quote_plus
import fiona
from .polyencoder import polyencode
def getproperties(feature, keys):
'''Return a list of properties from feature'''
return [feature['properties'].get(k) for k in keys]
def encodelayer(infile, keys, encode=None, delimiter=None):
keys = keys.split(',')
writer = csv.writer(sys.stdout, delimiter=delimiter or '\t')
with fiona.drivers():
with fiona.open(infile, 'r') as layer:
for feature in layer:
if feature['geometry']['type'] == 'MultiPolygon':
# list of list of lists of tuples
coords = feature['geometry']['coordinates'][0][0]
elif feature['geometry']['type'] == 'Polygon' or feature['geometry']['type'] == 'MultiLineString':
# list of lists of tuples
coords = feature['geometry']['coordinates'][0]
elif feature['geometry']['type'] == 'Linestring':
# list of tuples
coords = feature['geometry']['coordinates']
else:
raise NotImplementedError(
"Polyencode not available for geometry type: {}".format(feature['geometry']['type']))
try:
encoded = polyencode(coords)
except TypeError:
print("Unexpected issue with {}".format(feature['properties'].get(keys[0])), file=sys.stderr)
raise
if encode:
encoded = quote_plus(encoded)
props = getproperties(feature, keys) + [encoded]
writer.writerow(props)
| fitnr/polyencoder | polyencoder/polyencode_layer.py | Python | bsd-3-clause | 3,370 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'ostap'
copyright = '2019, Ostap developers'
author = 'Ostap developers'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'breathe' ,
'sphinx.ext.autodoc' ,
'sphinx.ext.mathjax' ,
]
breathe_default_project = 'ostap'
breathe_domain_by_extension = {
"h" : "cpp" ,
"C" : "cpp" ,
"py" : "python" ,
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
## html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| OstapHEP/ostap | docs/conf.py | Python | bsd-3-clause | 2,147 |
import base64
import os
import tempfile
import zipfile
from cumulusci.core.utils import process_bool_arg
from cumulusci.salesforce_api.metadata import ApiDeploy
from cumulusci.tasks.salesforce import BaseSalesforceMetadataApiTask
from cumulusci.utils import zip_clean_metaxml
from cumulusci.utils import zip_inject_namespace
from cumulusci.utils import zip_strip_namespace
from cumulusci.utils import zip_tokenize_namespace
class Deploy(BaseSalesforceMetadataApiTask):
api_class = ApiDeploy
task_options = {
'path': {
'description': 'The path to the metadata source to be deployed',
'required': True,
},
'unmanaged': {
'description': "If True, changes namespace_inject to replace tokens with a blank string",
},
'namespace_inject': {
'description': "If set, the namespace tokens in files and filenames are replaced with the namespace's prefix",
},
'namespace_strip': {
'description': "If set, all namespace prefixes for the namespace specified are stripped from files and filenames",
},
'namespace_tokenize': {
'description': "If set, all namespace prefixes for the namespace specified are replaced with tokens for use with namespace_inject",
},
'namespaced_org': {
'description': "If True, the tokens %%%NAMESPACED_ORG%%% and ___NAMESPACED_ORG___ will get replaced with the namespace. The default is false causing those tokens to get stripped and replaced with an empty string. Set this if deploying to a namespaced scratch org or packaging org.",
},
'clean_meta_xml': {
'description': "Defaults to True which strips the <packageVersions/> element from all meta.xml files. The packageVersion element gets added automatically by the target org and is set to whatever version is installed in the org. To disable this, set this option to False",
},
}
def _get_api(self, path=None):
if not path:
path = self.task_config.options__path
# Build the zip file
zip_file = tempfile.TemporaryFile()
zipf = zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED)
pwd = os.getcwd()
os.chdir(path)
for root, dirs, files in os.walk('.'):
for f in files:
self._write_zip_file(zipf, root, f)
zipf.close()
zipf_processed = self._process_zip_file(zipfile.ZipFile(zip_file))
zipf_processed.fp.seek(0)
package_zip = base64.b64encode(zipf_processed.fp.read())
os.chdir(pwd)
return self.api_class(self, package_zip, purge_on_delete=False)
def _process_zip_file(self, zipf):
zipf = self._process_namespace(zipf)
zipf = self._process_meta_xml(zipf)
return zipf
def _process_namespace(self, zipf):
if self.options.get('namespace_tokenize'):
self.logger.info(
'Tokenizing namespace prefix {}__'.format(
self.options['namespace_tokenize'],
)
)
zipf = zip_tokenize_namespace(zipf, self.options['namespace_tokenize'], logger=self.logger)
if self.options.get('namespace_inject'):
kwargs = {}
kwargs['managed'] = not process_bool_arg(self.options.get('unmanaged', True))
kwargs['namespaced_org'] = process_bool_arg(self.options.get('namespaced_org', False))
kwargs['logger'] = self.logger
if kwargs['managed']:
self.logger.info(
'Replacing namespace tokens from metadata with namespace prefix {}__'.format(
self.options['namespace_inject'],
)
)
else:
self.logger.info(
'Stripping namespace tokens from metadata for unmanaged deployment'
)
zipf = zip_inject_namespace(zipf, self.options['namespace_inject'], **kwargs)
if self.options.get('namespace_strip'):
zipf = zip_strip_namespace(zipf, self.options['namespace_strip'], logger=self.logger)
return zipf
def _process_meta_xml(self, zipf):
if not process_bool_arg(self.options.get('clean_meta_xml', True)):
return zipf
self.logger.info(
'Cleaning meta.xml files of packageVersion elements for deploy'
)
zipf = zip_clean_metaxml(zipf, logger=self.logger)
return zipf
def _write_zip_file(self, zipf, root, path):
zipf.write(os.path.join(root, path))
| e02d96ec16/CumulusCI | cumulusci/tasks/salesforce/Deploy.py | Python | bsd-3-clause | 4,641 |
from .store import (
Command, Store, RemoteStore, PubSub, PubSubClient,
parse_store_url, create_store, register_store, data_stores,
NoSuchStore
)
from .channels import Channels
from . import redis # noqa
from .pulsards.startds import start_store
__all__ = [
'Command',
'Store',
'RemoteStore',
'PubSub',
'PubSubClient',
'parse_store_url',
'create_store',
'register_store',
'data_stores',
'NoSuchStore',
'start_store',
'Channels'
]
| quantmind/pulsar | pulsar/apps/data/__init__.py | Python | bsd-3-clause | 496 |
# -*- coding:utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from unittest import mock, skipIf
import django
import pytest
from django.core.management import CommandError, call_command
from django.db.utils import ConnectionHandler
from django.test import SimpleTestCase
from django.utils.six.moves import StringIO
# Can't use @override_settings to swap out DATABASES, instead just mock.patch
# a new ConnectionHandler into the command module
command_connections = 'django_mysql.management.commands.dbparams.connections'
sqlite = ConnectionHandler({
'default': {'ENGINE': 'django.db.backends.sqlite3'}
})
full_db = ConnectionHandler({'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mydatabase',
'USER': 'ausername',
'PASSWORD': 'apassword',
'HOST': 'ahost.example.com',
'PORT': '12345',
'OPTIONS': {
'read_default_file': '/tmp/defaults.cnf',
'ssl': {'ca': '/tmp/mysql.cert'}
}
}})
socket_db = ConnectionHandler({'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '/etc/mydb.sock',
}})
class DBParamsTests(SimpleTestCase):
@skipIf(django.VERSION[:2] >= (1, 10),
'argument parsing uses a fixed single argument in Django 1.10+')
def test_invalid_number_of_databases(self):
with pytest.raises(CommandError) as excinfo:
call_command('dbparams', 'default', 'default', skip_checks=True)
assert "more than one connection" in str(excinfo.value)
def test_invalid_database(self):
with pytest.raises(CommandError) as excinfo:
call_command('dbparams', 'nonexistent', skip_checks=True)
assert "does not exist" in str(excinfo.value)
def test_invalid_both(self):
with pytest.raises(CommandError):
call_command('dbparams', dsn=True, mysql=True, skip_checks=True)
@mock.patch(command_connections, sqlite)
def test_invalid_not_mysql(self):
with pytest.raises(CommandError) as excinfo:
call_command('dbparams', skip_checks=True)
assert "not a MySQL database connection" in str(excinfo.value)
@mock.patch(command_connections, full_db)
def test_mysql_full(self):
out = StringIO()
call_command('dbparams', stdout=out, skip_checks=True)
output = out.getvalue()
assert (
output ==
"--defaults-file=/tmp/defaults.cnf --user=ausername "
"--password=apassword --host=ahost.example.com --port=12345 "
"--ssl-ca=/tmp/mysql.cert mydatabase"
)
@mock.patch(command_connections, socket_db)
def test_mysql_socket(self):
out = StringIO()
call_command('dbparams', stdout=out, skip_checks=True)
output = out.getvalue()
assert output == "--socket=/etc/mydb.sock"
@mock.patch(command_connections, full_db)
def test_dsn_full(self):
out = StringIO()
err = StringIO()
call_command('dbparams', 'default', dsn=True,
stdout=out, stderr=err, skip_checks=True)
output = out.getvalue()
assert (
output ==
"F=/tmp/defaults.cnf,u=ausername,p=apassword,h=ahost.example.com,"
"P=12345,D=mydatabase"
)
errors = err.getvalue()
assert "SSL params can't be" in errors
@mock.patch(command_connections, socket_db)
def test_dsn_socket(self):
out = StringIO()
err = StringIO()
call_command('dbparams', dsn=True,
stdout=out, stderr=err, skip_checks=True)
output = out.getvalue()
assert output == 'S=/etc/mydb.sock'
errors = err.getvalue()
assert errors == ""
| nickmeharry/django-mysql | tests/testapp/management/commands/test_dbparams.py | Python | bsd-3-clause | 3,738 |
# -*- coding: utf-8 -*-
"""
Django settings for djangocali-portal project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('djangocali-portal')
env = environ.Env()
environ.Env.read_env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'djangocali-portal.users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'djangocali-portal.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""djangocali""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///djangocali-portal"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## CELERY
INSTALLED_APPS += ('djangocali-portal.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env("CELERY_BROKER_URL", default='django://')
########## END CELERY
# Your common stuff: Below this line define 3rd party library settings
| Swappsco/portal | config/settings/common.py | Python | bsd-3-clause | 9,483 |
# -*- coding: utf-8 -*-
#
# Smisk documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 26 20:24:33 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# Load release info
exec(open(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', 'lib', 'tc', 'release.py'))).read())
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'tc'
copyright = 'Rasmus Andersson'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
#todo_include_todos = True
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'screen.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'tcdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'tc.tex', 'tc documentation',
'Rasmus Andersson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| rsms/tc | docs/source/conf.py | Python | bsd-3-clause | 5,778 |
# -*- coding: utf-8 -*-
# /etl/database.py
"""Database module, including the SQLAlchemy database object and DB-related utilities."""
from sqlalchemy.orm import relationship
from .compat import basestring
from .extensions import db
# Alias common SQLAlchemy names
Column = db.Column
relationship = relationship
class CRUDMixin(object):
"""Mixin that adds convenience methods for CRUD (create, read, update, delete) operations."""
@classmethod
def create(cls, **kwargs):
"""Create a new record and save it the database."""
instance = cls(**kwargs)
return instance.save()
def update(self, commit=True, **kwargs):
"""Update specific fields of a record."""
for attr, value in kwargs.items():
setattr(self, attr, value)
return commit and self.save() or self
def save(self, commit=True):
"""Save the record."""
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
"""Remove the record from the database."""
db.session.delete(self)
return commit and db.session.commit()
class Model(CRUDMixin, db.Model):
"""Base model class that includes CRUD convenience methods."""
__abstract__ = True
# From Mike Bayer's "Building the app" talk
# https://speakerdeck.com/zzzeek/building-the-app
class SurrogatePK(object):
"""A mixin that adds a surrogate integer 'primary key' column named ``id`` to any declarative-mapped class."""
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, record_id):
"""Get record by ID."""
if any(
(isinstance(record_id, basestring) and record_id.isdigit(),
isinstance(record_id, (int, float))),
):
return cls.query.get(int(record_id))
return None
def reference_col(tablename, nullable=False, pk_name='id', **kwargs):
"""Column that adds primary key foreign key reference.
Usage: ::
category_id = reference_col('category')
category = relationship('Category', backref='categories')
"""
return db.Column(
db.ForeignKey('{0}.{1}'.format(tablename, pk_name)),
nullable=nullable, **kwargs)
| tomtom92/etl | etl/database.py | Python | bsd-3-clause | 2,331 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import re
from ... import gloo
class Compiler(object):
"""
Compiler is used to convert Function and Variable instances into
ready-to-use GLSL code. This class handles name mangling to ensure that
there are no name collisions amongst global objects. The final name of
each object may be retrieved using ``Compiler.__getitem__(obj)``.
Accepts multiple root Functions as keyword arguments. ``compile()`` then
returns a dict of GLSL strings with the same keys.
Example::
# initialize with two main functions
compiler = Compiler(vert=v_func, frag=f_func)
# compile and extract shaders
code = compiler.compile()
v_code = code['vert']
f_code = code['frag']
# look up name of some object
name = compiler[obj]
"""
def __init__(self, **shaders):
# cache of compilation results for each function and variable
self._object_names = {} # {object: name}
self.shaders = shaders
def __getitem__(self, item):
"""
Return the name of the specified object, if it has been assigned one.
"""
return self._object_names[item]
def compile(self, pretty=True):
""" Compile all code and return a dict {name: code} where the keys
are determined by the keyword arguments passed to __init__().
Parameters
----------
pretty : bool
If True, use a slower method to mangle object names. This produces
GLSL that is more readable.
If False, then the output is mostly unreadable GLSL, but is about
10x faster to compile.
"""
# Authoritative mapping of {obj: name}
self._object_names = {}
#
# 1. collect list of dependencies for each shader
#
# maps {shader_name: [deps]}
self._shader_deps = {}
for shader_name, shader in self.shaders.items():
this_shader_deps = []
self._shader_deps[shader_name] = this_shader_deps
dep_set = set()
for dep in shader.dependencies(sort=True):
# visit each object no more than once per shader
if dep.name is None or dep in dep_set:
continue
this_shader_deps.append(dep)
dep_set.add(dep)
#
# 2. Assign names to all objects.
#
if pretty:
self._rename_objects_pretty()
else:
self._rename_objects_fast()
#
# 3. Now we have a complete namespace; concatenate all definitions
# together in topological order.
#
compiled = {}
obj_names = self._object_names
for shader_name, shader in self.shaders.items():
code = []
for dep in self._shader_deps[shader_name]:
dep_code = dep.definition(obj_names)
if dep_code is not None:
# strip out version pragma if present;
regex = r'#version (\d+)'
m = re.search(regex, dep_code)
if m is not None:
# check requested version
if m.group(1) != '120':
raise RuntimeError("Currently only GLSL #version "
"120 is supported.")
dep_code = re.sub(regex, '', dep_code)
code.append(dep_code)
compiled[shader_name] = '\n'.join(code)
self.code = compiled
return compiled
def _rename_objects_fast(self):
""" Rename all objects quickly to guaranteed-unique names using the
id() of each object.
This produces mostly unreadable GLSL, but is about 10x faster to
compile.
"""
for shader_name, deps in self._shader_deps.items():
for dep in deps:
name = dep.name
if name != 'main':
ext = '_%x' % id(dep)
name = name[:32-len(ext)] + ext
self._object_names[dep] = name
def _rename_objects_pretty(self):
""" Rename all objects like "name_1" to avoid conflicts. Objects are
only renamed if necessary.
This method produces more readable GLSL, but is rather slow.
"""
#
# 1. For each object, add its static names to the global namespace
# and make a list of the shaders used by the object.
#
# {name: obj} mapping for finding unique names
# initialize with reserved keywords.
self._global_ns = dict([(kwd, None) for kwd in gloo.util.KEYWORDS])
# functions are local per-shader
self._shader_ns = dict([(shader, {}) for shader in self.shaders])
# for each object, keep a list of shaders the object appears in
obj_shaders = {}
for shader_name, deps in self._shader_deps.items():
for dep in deps:
# Add static names to namespace
for name in dep.static_names():
self._global_ns[name] = None
obj_shaders.setdefault(dep, []).append(shader_name)
#
# 2. Assign new object names
#
name_index = {}
for obj, shaders in obj_shaders.items():
name = obj.name
if self._name_available(obj, name, shaders):
# hooray, we get to keep this name
self._assign_name(obj, name, shaders)
else:
# boo, find a new name
while True:
index = name_index.get(name, 0) + 1
name_index[name] = index
ext = '_%d' % index
new_name = name[:32-len(ext)] + ext
if self._name_available(obj, new_name, shaders):
self._assign_name(obj, new_name, shaders)
break
def _is_global(self, obj):
""" Return True if *obj* should be declared in the global namespace.
Some objects need to be declared only in per-shader namespaces:
functions, static variables, and const variables may all be given
different definitions in each shader.
"""
# todo: right now we assume all Variables are global, and all
# Functions are local. Is this actually correct? Are there any
# global functions? Are there any local variables?
from .variable import Variable
return isinstance(obj, Variable)
def _name_available(self, obj, name, shaders):
""" Return True if *name* is available for *obj* in *shaders*.
"""
if name in self._global_ns:
return False
shaders = self.shaders if self._is_global(obj) else shaders
for shader in shaders:
if name in self._shader_ns[shader]:
return False
return True
def _assign_name(self, obj, name, shaders):
""" Assign *name* to *obj* in *shaders*.
"""
if self._is_global(obj):
assert name not in self._global_ns
self._global_ns[name] = obj
else:
for shader in shaders:
ns = self._shader_ns[shader]
assert name not in ns
ns[name] = obj
self._object_names[obj] = name
| hronoses/vispy | vispy/visuals/shaders/compiler.py | Python | bsd-3-clause | 7,604 |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds browsers that can be controlled by telemetry."""
import logging
from telemetry import decorators
from telemetry.internal.backends.chrome import android_browser_finder
from telemetry.internal.backends.chrome import cros_browser_finder
from telemetry.internal.backends.chrome import desktop_browser_finder
from telemetry.internal.browser import browser_finder_exceptions
from telemetry.internal.platform import device_finder
BROWSER_FINDERS = [
desktop_browser_finder,
android_browser_finder,
cros_browser_finder,
]
def FindAllBrowserTypes(options):
browsers = []
for bf in BROWSER_FINDERS:
browsers.extend(bf.FindAllBrowserTypes(options))
return browsers
@decorators.Cache
def FindBrowser(options):
"""Finds the best PossibleBrowser object given a BrowserOptions object.
Args:
A BrowserOptions object.
Returns:
A PossibleBrowser object.
Raises:
BrowserFinderException: Options improperly set, or an error occurred.
"""
if options.__class__.__name__ == '_FakeBrowserFinderOptions':
return options.fake_possible_browser
if options.browser_type == 'exact' and options.browser_executable == None:
raise browser_finder_exceptions.BrowserFinderException(
'--browser=exact requires --browser-executable to be set.')
if options.browser_type != 'exact' and options.browser_executable != None:
raise browser_finder_exceptions.BrowserFinderException(
'--browser-executable requires --browser=exact.')
if options.browser_type == 'cros-chrome' and options.cros_remote == None:
raise browser_finder_exceptions.BrowserFinderException(
'browser_type=cros-chrome requires cros_remote be set.')
if (options.browser_type != 'cros-chrome' and
options.browser_type != 'cros-chrome-guest' and
options.cros_remote != None):
raise browser_finder_exceptions.BrowserFinderException(
'--remote requires --browser=cros-chrome or cros-chrome-guest.')
devices = device_finder.GetDevicesMatchingOptions(options)
browsers = []
default_browsers = []
for device in devices:
for finder in BROWSER_FINDERS:
if(options.browser_type and options.browser_type != 'any' and
options.browser_type not in finder.FindAllBrowserTypes(options)):
continue
curr_browsers = finder.FindAllAvailableBrowsers(options, device)
new_default_browser = finder.SelectDefaultBrowser(curr_browsers)
if new_default_browser:
default_browsers.append(new_default_browser)
browsers.extend(curr_browsers)
if options.browser_type == None:
if default_browsers:
default_browser = sorted(default_browsers,
key=lambda b: b.last_modification_time())[-1]
logging.warning('--browser omitted. Using most recent local build: %s',
default_browser.browser_type)
default_browser.UpdateExecutableIfNeeded()
return default_browser
if len(browsers) == 1:
logging.warning('--browser omitted. Using only available browser: %s',
browsers[0].browser_type)
browsers[0].UpdateExecutableIfNeeded()
return browsers[0]
raise browser_finder_exceptions.BrowserTypeRequiredException(
'--browser must be specified. Available browsers:\n%s' %
'\n'.join(sorted(set([b.browser_type for b in browsers]))))
if options.browser_type == 'any':
types = FindAllBrowserTypes(options)
def CompareBrowsersOnTypePriority(x, y):
x_idx = types.index(x.browser_type)
y_idx = types.index(y.browser_type)
return x_idx - y_idx
browsers.sort(CompareBrowsersOnTypePriority)
if len(browsers) >= 1:
browsers[0].UpdateExecutableIfNeeded()
return browsers[0]
else:
return None
matching_browsers = [
b for b in browsers
if b.browser_type == options.browser_type and
b.SupportsOptions(options.browser_options)]
chosen_browser = None
if len(matching_browsers) == 1:
chosen_browser = matching_browsers[0]
elif len(matching_browsers) > 1:
logging.warning('Multiple browsers of the same type found: %s',
repr(matching_browsers))
chosen_browser = sorted(matching_browsers,
key=lambda b: b.last_modification_time())[-1]
if chosen_browser:
logging.info('Chose browser: %s', repr(chosen_browser))
chosen_browser.UpdateExecutableIfNeeded()
return chosen_browser
@decorators.Cache
def GetAllAvailableBrowsers(options, device):
"""Returns a list of available browsers on the device.
Args:
options: A BrowserOptions object.
device: The target device, which can be None.
Returns:
A list of browser instances.
Raises:
BrowserFinderException: Options are improperly set, or an error occurred.
"""
if not device:
return []
possible_browsers = []
for browser_finder in BROWSER_FINDERS:
possible_browsers.extend(
browser_finder.FindAllAvailableBrowsers(options, device))
return possible_browsers
@decorators.Cache
def GetAllAvailableBrowserTypes(options):
"""Returns a list of available browser types.
Args:
options: A BrowserOptions object.
Returns:
A list of browser type strings.
Raises:
BrowserFinderException: Options are improperly set, or an error occurred.
"""
devices = device_finder.GetDevicesMatchingOptions(options)
possible_browsers = []
for device in devices:
possible_browsers.extend(GetAllAvailableBrowsers(options, device))
type_list = set([browser.browser_type for browser in possible_browsers])
# The reference build should be available for mac, linux and win, but the
# desktop browser finder won't return it in the list of browsers.
for browser in possible_browsers:
if (browser.target_os == 'darwin' or browser.target_os.startswith('linux')
or browser.target_os.startswith('win')):
type_list.add('reference')
break
type_list = list(type_list)
type_list.sort()
return type_list
| catapult-project/catapult-csm | telemetry/telemetry/internal/browser/browser_finder.py | Python | bsd-3-clause | 6,167 |
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from fabtastic import db
class Command(BaseCommand):
args = '[<output_file_path>]'
help = 'Dumps a SQL backup of your entire DB. Defaults to CWD.'
def get_dump_path(self, db_alias):
"""
Determines the path to write the SQL dump to. Depends on whether the
user specified a path or not.
"""
if len(self.args) > 0:
return self.args[0]
else:
dump_filename = db.util.get_db_dump_filename(db_alias=db_alias)
return os.path.join(os.getcwd(), dump_filename)
def handle(self, *args, **options):
"""
Handle raw input.
"""
self.args = args
self.options = options
db_alias = getattr(settings, 'FABTASTIC_DIRECT_TO_DB_ALIAS', 'default')
# Get DB settings from settings.py.
database = db.util.get_db_setting_dict(db_alias=db_alias)
# Figure out where to dump the file to.
dump_path = self.get_dump_path(db_alias)
# Run the db dump.
db.dump_db_to_file(dump_path, database) | duointeractive/django-fabtastic | fabtastic/management/commands/ft_dump_db.py | Python | bsd-3-clause | 1,185 |
# -*- coding: utf-8 -*-
import json
import datetime
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.models import Text
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.sites import site
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission, AnonymousUser
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.http import (Http404, HttpResponseBadRequest,
QueryDict, HttpResponseNotFound)
from django.utils.encoding import force_text, smart_str
from django.utils import timezone
from django.utils.six.moves.urllib.parse import urlparse
from cms import api
from cms.api import create_page, create_title, add_plugin, publish_page
from cms.admin.change_list import CMSChangeList
from cms.admin.forms import PageForm, AdvancedSettingsForm
from cms.admin.pageadmin import PageAdmin
from cms.constants import TEMPLATE_INHERITANCE_MAGIC
from cms.models import StaticPlaceholder
from cms.models.pagemodel import Page
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.models.titlemodels import Title
from cms.test_utils import testcases as base
from cms.test_utils.testcases import (
CMSTestCase, URL_CMS_PAGE_DELETE, URL_CMS_PAGE,URL_CMS_TRANSLATION_DELETE,
URL_CMS_PAGE_CHANGE_LANGUAGE, URL_CMS_PAGE_CHANGE,
URL_CMS_PAGE_ADD, URL_CMS_PAGE_PUBLISHED
)
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils import get_cms_setting
from cms.utils.compat import DJANGO_1_10
from cms.utils.urlutils import admin_reverse
class AdminTestsBase(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def _get_guys(self, admin_only=False, use_global_permissions=True):
admin_user = self.get_superuser()
if admin_only:
return admin_user
staff_user = self._get_staff_user(use_global_permissions)
return admin_user, staff_user
def _get_staff_user(self, use_global_permissions=True):
USERNAME = 'test'
if get_user_model().USERNAME_FIELD == 'email':
normal_guy = get_user_model().objects.create_user(USERNAME, '[email protected]', '[email protected]')
else:
normal_guy = get_user_model().objects.create_user(USERNAME, '[email protected]', USERNAME)
normal_guy.is_staff = True
normal_guy.is_active = True
normal_guy.save()
normal_guy.user_permissions = Permission.objects.filter(
codename__in=['change_page', 'change_title', 'add_page', 'add_title', 'delete_page', 'delete_title']
)
if use_global_permissions:
gpp = GlobalPagePermission.objects.create(
user=normal_guy,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
return normal_guy
class AdminTestCase(AdminTestsBase):
def test_extension_not_in_admin(self):
admin_user, staff = self._get_guys()
with self.login_user_context(admin_user):
request = self.get_request(URL_CMS_PAGE_CHANGE % 1, 'en',)
response = site.index(request)
self.assertNotContains(response, '/mytitleextension/')
self.assertNotContains(response, '/mypageextension/')
def test_2apphooks_with_same_namespace(self):
PAGE1 = 'Test Page'
PAGE2 = 'Test page 2'
APPLICATION_URLS = 'project.sampleapp.urls'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(PAGE1, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page2 = create_page(PAGE2, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.application_urls = APPLICATION_URLS
page.application_namespace = "space1"
page.save()
page2.application_urls = APPLICATION_URLS
page2.save()
# The admin edits the page (change the page name for ex.)
page_data = {
'title': PAGE2,
'slug': page2.get_slug(),
'language': 'en',
'site': page.site.pk,
'template': page2.template,
'application_urls': 'SampleApp',
'application_namespace': 'space1',
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(Page.objects.filter(application_namespace="space1").count(), 1)
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data)
self.assertEqual(resp.status_code, 200)
page_data['application_namespace'] = 'space2'
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data)
self.assertEqual(resp.status_code, 302)
def test_delete(self):
admin_user = self.get_superuser()
create_page("home", "nav_playground.html", "en",
created_by=admin_user, published=True)
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_page('child-page', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=page)
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
page.publish('en')
with self.login_user_context(admin_user):
data = {'post': 'yes'}
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
def test_delete_diff_language(self):
admin_user = self.get_superuser()
create_page("home", "nav_playground.html", "en",
created_by=admin_user, published=True)
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_page('child-page', "nav_playground.html", "de",
created_by=admin_user, published=True, parent=page)
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
page.publish('en')
with self.login_user_context(admin_user):
data = {'post': 'yes'}
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
def test_search_fields(self):
superuser = self.get_superuser()
from django.contrib.admin import site
with self.login_user_context(superuser):
for model, admin_instance in site._registry.items():
if model._meta.app_label != 'cms':
continue
if not admin_instance.search_fields:
continue
url = admin_reverse('cms_%s_changelist' % model._meta.model_name)
response = self.client.get('%s?q=1' % url)
errmsg = response.content
self.assertEqual(response.status_code, 200, errmsg)
def test_pagetree_filtered(self):
superuser = self.get_superuser()
create_page("root-page", "nav_playground.html", "en",
created_by=superuser, published=True)
with self.login_user_context(superuser):
url = admin_reverse('cms_page_changelist')
response = self.client.get('%s?template__exact=nav_playground.html' % url)
errmsg = response.content
self.assertEqual(response.status_code, 200, errmsg)
def test_delete_translation(self):
admin_user = self.get_superuser()
page = create_page("delete-page-translation", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_title("de", "delete-page-translation-2", page, slug="delete-page-translation-2")
create_title("es-mx", "delete-page-translation-es", page, slug="delete-page-translation-es")
with self.login_user_context(admin_user):
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertRedirects(response, URL_CMS_PAGE)
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'})
self.assertRedirects(response, URL_CMS_PAGE)
def test_change_dates(self):
admin_user, staff = self._get_guys()
with self.settings(USE_TZ=False, TIME_ZONE='UTC'):
page = create_page('test-page', 'nav_playground.html', 'en')
page.publish('en')
draft = page.get_draft_object()
original_date = draft.publication_date
original_end_date = draft.publication_end_date
new_date = timezone.now() - datetime.timedelta(days=1)
new_end_date = timezone.now() + datetime.timedelta(days=1)
url = admin_reverse('cms_page_dates', args=(draft.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {
'language': 'en',
'site': draft.site.pk,
'publication_date_0': new_date.date(),
'publication_date_1': new_date.strftime("%H:%M:%S"),
'publication_end_date_0': new_end_date.date(),
'publication_end_date_1': new_end_date.strftime("%H:%M:%S"),
})
self.assertEqual(response.status_code, 302)
draft = Page.objects.get(pk=draft.pk)
self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple())
self.assertEqual(draft.publication_date.timetuple(), new_date.timetuple())
self.assertEqual(draft.publication_end_date.timetuple(), new_end_date.timetuple())
if original_end_date:
self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple())
with self.settings(USE_TZ=True, TIME_ZONE='UTC'):
page = create_page('test-page-2', 'nav_playground.html', 'en')
page.publish('en')
draft = page.get_draft_object()
original_date = draft.publication_date
original_end_date = draft.publication_end_date
new_date = timezone.localtime(timezone.now()) - datetime.timedelta(days=1)
new_end_date = timezone.localtime(timezone.now()) + datetime.timedelta(days=1)
url = admin_reverse('cms_page_dates', args=(draft.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {
'language': 'en',
'site': draft.site.pk,
'publication_date_0': new_date.date(),
'publication_date_1': new_date.strftime("%H:%M:%S"),
'publication_end_date_0': new_end_date.date(),
'publication_end_date_1': new_end_date.strftime("%H:%M:%S"),
})
self.assertEqual(response.status_code, 302)
draft = Page.objects.get(pk=draft.pk)
self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple())
self.assertEqual(timezone.localtime(draft.publication_date).timetuple(), new_date.timetuple())
self.assertEqual(timezone.localtime(draft.publication_end_date).timetuple(), new_end_date.timetuple())
if original_end_date:
self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple())
def test_change_template(self):
admin_user, staff = self._get_guys()
request = self.get_request(URL_CMS_PAGE_CHANGE % 1, 'en')
request.method = "POST"
pageadmin = site._registry[Page]
with self.login_user_context(staff):
self.assertRaises(Http404, pageadmin.change_template, request, 1)
page = create_page('test-page', 'nav_playground.html', 'en')
response = pageadmin.change_template(request, page.pk)
self.assertEqual(response.status_code, 403)
url = admin_reverse('cms_page_change_template', args=(page.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {'template': 'doesntexist'})
self.assertEqual(response.status_code, 400)
response = self.client.post(url, {'template': get_cms_setting('TEMPLATES')[0][0]})
self.assertEqual(response.status_code, 200)
def test_changelist_items(self):
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en')
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=second_level_page_top)
self.assertEqual(Page.objects.all().count(), 4)
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
request = self.get_request(url)
request.session = {}
request.user = admin_user
page_admin = site._registry[Page]
cl_params = [request, page_admin.model, page_admin.list_display,
page_admin.list_display_links, page_admin.list_filter,
page_admin.date_hierarchy, page_admin.search_fields,
page_admin.list_select_related, page_admin.list_per_page]
if hasattr(page_admin, 'list_max_show_all'): # django 1.4
cl_params.append(page_admin.list_max_show_all)
cl_params.extend([page_admin.list_editable, page_admin])
cl = CMSChangeList(*tuple(cl_params))
root_page = cl.items[0]
self.assertEqual(root_page, first_level_page)
self.assertEqual(root_page.get_children()[0], second_level_page_top)
self.assertEqual(root_page.get_children()[1], second_level_page_bottom)
self.assertEqual(root_page.get_children()[0].get_children()[0], third_level_page)
def test_changelist_get_results(self):
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en', published=True)
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en", # nopyflakes
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en", # nopyflakes
created_by=admin_user, published=True,
parent=second_level_page_top)
fourth_level_page = create_page('level23', "nav_playground.html", "en", # nopyflakes
created_by=admin_user,
parent=self.reload(first_level_page))
self.assertEqual(Page.objects.all().count(), 9)
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
request = self.get_request(url)
request.session = {}
request.user = admin_user
page_admin = site._registry[Page]
# full blown page list. only draft pages are taken into account
cl_params = [request, page_admin.model, page_admin.list_display,
page_admin.list_display_links, page_admin.list_filter,
page_admin.date_hierarchy, page_admin.search_fields,
page_admin.list_select_related, page_admin.list_per_page]
if hasattr(page_admin, 'list_max_show_all'): # django 1.4
cl_params.append(page_admin.list_max_show_all)
cl_params.extend([page_admin.list_editable, page_admin])
cl = CMSChangeList(*tuple(cl_params))
cl.get_results(request)
self.assertEqual(cl.full_result_count, 5)
self.assertEqual(cl.result_count, 5)
# only one unpublished page is returned
request = self.get_request(url+'?q=level23')
request.session = {}
request.user = admin_user
cl_params[0] = request
cl = CMSChangeList(*tuple(cl_params))
cl.get_results(request)
self.assertEqual(cl.full_result_count, 5)
self.assertEqual(cl.result_count, 1)
# a number of pages matches the query
request = self.get_request(url+'?q=level2')
request.session = {}
request.user = admin_user
cl_params[0] = request
cl = CMSChangeList(*tuple(cl_params))
cl.get_results(request)
self.assertEqual(cl.full_result_count, 5)
self.assertEqual(cl.result_count, 3)
def test_unihandecode_doesnt_break_404_in_admin(self):
self.get_superuser()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='admin', password='admin')
response = self.client.get(URL_CMS_PAGE_CHANGE_LANGUAGE % (1, 'en'))
# Since Django 1.11 404 results in redirect to the admin home
if DJANGO_1_10:
self.assertEqual(response.status_code, 404)
else:
self.assertRedirects(response, reverse('admin:index'))
def test_empty_placeholder_with_nested_plugins(self):
# It's important that this test clears a placeholder
# which only has nested plugins.
# This allows us to catch a strange bug that happened
# under these conditions with the new related name handling.
page_en = create_page("EmptyPlaceholderTestPage (EN)", "nav_playground.html", "en")
ph = page_en.placeholders.get(slot="body")
column_wrapper = add_plugin(ph, "MultiColumnPlugin", "en")
add_plugin(ph, "ColumnPlugin", "en", parent=column_wrapper)
add_plugin(ph, "ColumnPlugin", "en", parent=column_wrapper)
# before cleaning the de placeholder
self.assertEqual(ph.get_plugins('en').count(), 3)
admin_user, staff = self._get_guys()
endpoint = self.get_clear_placeholder_url(ph, language='en')
with self.login_user_context(admin_user):
response = self.client.post(endpoint, {'test': 0})
self.assertEqual(response.status_code, 302)
# After cleaning the de placeholder, en placeholder must still have all the plugins
self.assertEqual(ph.get_plugins('en').count(), 0)
def test_empty_placeholder_in_correct_language(self):
"""
Test that Cleaning a placeholder only affect current language contents
"""
# create some objects
page_en = create_page("EmptyPlaceholderTestPage (EN)", "nav_playground.html", "en")
ph = page_en.placeholders.get(slot="body")
# add the text plugin to the en version of the page
add_plugin(ph, "TextPlugin", "en", body="Hello World EN 1")
add_plugin(ph, "TextPlugin", "en", body="Hello World EN 2")
# creating a de title of the page and adding plugins to it
create_title("de", page_en.get_title(), page_en, slug=page_en.get_slug())
add_plugin(ph, "TextPlugin", "de", body="Hello World DE")
add_plugin(ph, "TextPlugin", "de", body="Hello World DE 2")
add_plugin(ph, "TextPlugin", "de", body="Hello World DE 3")
# before cleaning the de placeholder
self.assertEqual(ph.get_plugins('en').count(), 2)
self.assertEqual(ph.get_plugins('de').count(), 3)
admin_user, staff = self._get_guys()
endpoint = self.get_clear_placeholder_url(ph, language='de')
with self.login_user_context(admin_user):
response = self.client.post(endpoint, {'test': 0})
self.assertEqual(response.status_code, 302)
# After cleaning the de placeholder, en placeholder must still have all the plugins
self.assertEqual(ph.get_plugins('en').count(), 2)
self.assertEqual(ph.get_plugins('de').count(), 0)
class AdminTests(AdminTestsBase):
# TODO: needs tests for actual permissions, not only superuser/normaluser
def setUp(self):
self.page = create_page("testpage", "nav_playground.html", "en")
def get_admin(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True, is_superuser=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
usr = User(**fields)
usr.set_password(getattr(usr, User.USERNAME_FIELD))
usr.save()
return usr
def get_permless(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "permless"
usr = User(**fields)
usr.set_password(getattr(usr, User.USERNAME_FIELD))
usr.save()
return usr
def get_page(self):
return self.page
def test_change_publish_unpublish(self):
page = self.get_page()
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 405)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 403)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
page = self.reload(page)
self.assertTrue(page.is_published('en'))
response = self.admin_class.unpublish(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
def test_change_status_adds_log_entry(self):
page = self.get_page()
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
self.assertFalse(LogEntry.objects.count())
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
self.assertEqual(1, LogEntry.objects.count())
self.assertEqual(page.pk, int(LogEntry.objects.all()[0].object_id))
def test_change_innavigation(self):
page = self.get_page()
permless = self.get_permless()
admin_user = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 405)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
self.assertRaises(Http404, self.admin_class.change_innavigation,
request, page.pk + 100)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
old = page.in_navigation
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 204)
page = self.reload(page)
self.assertEqual(old, not page.in_navigation)
def test_publish_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.publish_page(request, Page.objects.all()[0].pk, "en")
self.assertEqual(response.status_code, 403)
def test_remove_plugin_requires_post(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
endpoint = self.get_delete_plugin_uri(plugin)
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
def test_move_language(self):
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
col = add_plugin(source, 'MultiColumnPlugin', 'en')
sub_col = add_plugin(source, 'ColumnPlugin', 'en', target=col)
col2 = add_plugin(source, 'MultiColumnPlugin', 'de')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
data = {
'plugin_id': sub_col.pk,
'placeholder_id': source.id,
'plugin_parent': col2.pk,
'plugin_language': 'de'
}
endpoint = self.get_move_plugin_uri(sub_col)
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
sub_col = CMSPlugin.objects.get(pk=sub_col.pk)
self.assertEqual(sub_col.language, "de")
self.assertEqual(sub_col.parent_id, col2.pk)
def test_preview_page(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
self.assertRaises(Http404, self.admin_class.preview_page, request, 404, "en")
page = self.get_page()
page.publish("en")
Page.set_homepage(page)
base_url = page.get_absolute_url()
with self.login_user_context(permless):
request = self.get_request('/?public=true')
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
request = self.get_request()
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
current_site = Site.objects.create(domain='django-cms.org', name='django-cms')
page.site = current_site
page.save()
page.publish("en")
self.assertTrue(page.is_home)
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'],
'http://django-cms.org%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
def test_too_many_plugins_global(self):
conf = {
'body': {
'limits': {
'global': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_too_many_plugins_type(self):
conf = {
'body': {
'limits': {
'TextPlugin': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_edit_title_dirty_bit(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_edit_title_languages(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_page_form_leak(self):
language = "en"
admin_user = self.get_admin()
request = self.get_request('/', 'en')
request.user = admin_user
page = create_page('A', 'nav_playground.html', language, menu_title='menu title')
page_admin = PageAdmin(Page, site)
page_admin._current_page = page
edit_form = page_admin.get_form(request, page)
add_form = page_admin.get_form(request, None)
self.assertEqual(edit_form.base_fields['menu_title'].initial, 'menu title')
self.assertEqual(add_form.base_fields['menu_title'].initial, None)
class NoDBAdminTests(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def test_lookup_allowed_site__exact(self):
self.assertTrue(self.admin_class.lookup_allowed('site__exact', '1'))
def test_lookup_allowed_published(self):
self.assertTrue(self.admin_class.lookup_allowed('published', value='1'))
class PluginPermissionTests(AdminTestsBase):
def setUp(self):
self._page = create_page('test page', 'nav_playground.html', 'en')
self._placeholder = self._page.placeholders.all()[0]
def _get_admin(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True, is_active=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
admin_user = User(**fields)
admin_user.set_password('admin')
admin_user.save()
return admin_user
def _get_page_admin(self):
return admin.site._registry[Page]
def _give_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.add(Permission.objects.get(codename=codename))
def _give_page_permission_rights(self, user):
self._give_permission(user, PagePermission, 'add')
self._give_permission(user, PagePermission, 'change')
self._give_permission(user, PagePermission, 'delete')
def _get_change_page_request(self, user, page):
return type('Request', (object,), {
'user': user,
'path': base.URL_CMS_PAGE_CHANGE % page.pk
})
def _give_cms_permissions(self, user, save=True):
for perm_type in ['add', 'change', 'delete']:
for model in [Page, Title]:
self._give_permission(user, model, perm_type, False)
gpp = GlobalPagePermission.objects.create(
user=user,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
if save:
user.save()
def _create_plugin(self):
plugin = add_plugin(self._placeholder, 'TextPlugin', 'en')
return plugin
def test_plugin_edit_wrong_url(self):
"""User tries to edit a plugin using a random url. 404 response returned"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
self._give_permission(normal_guy, Text, 'change')
url = '%s/edit-plugin/%s/' % (admin_reverse('cms_page_edit_plugin', args=[plugin.id]), plugin.id)
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseNotFound.status_code)
self.assertTrue("Plugin not found" in force_text(response.content))
class AdminFormsTests(AdminTestsBase):
def test_clean_overwrite_url(self):
user = AnonymousUser()
user.is_superuser = True
user.pk = 1
request = type('Request', (object,), {'user': user})
with self.settings():
data = {
'title': 'TestPage',
'slug': 'test-page',
'language': 'en',
'overwrite_url': '/overwrite/url/',
'site': Site.objects.get_current().pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'published': True
}
form = PageForm(data)
self.assertTrue(form.is_valid(), form.errors.as_text())
instance = form.save()
instance.permission_user_cache = user
instance.permission_advanced_settings_cache = True
Title.objects.set_or_create(request, instance, form, 'en')
form = PageForm(data, instance=instance)
self.assertTrue(form.is_valid(), form.errors.as_text())
def test_missmatching_site_parent_dotsite(self):
site0 = Site.objects.create(domain='foo.com', name='foo.com')
site1 = Site.objects.create(domain='foo2.com', name='foo.com')
parent_page = Page.objects.create(
template='nav_playground.html',
site=site0)
new_page_data = {
'title': 'Title',
'slug': 'slug',
'language': 'en',
'site': site1.pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent': parent_page.pk,
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
self.assertIn(u"Site doesn't match the parent's page site",
form.errors['__all__'])
def test_form_errors(self):
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'en',
'site': 10,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
site0 = Site.objects.create(domain='foo.com', name='foo.com', pk=2)
page1 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "fr", site=site0)
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent': page1.pk,
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
new_page_data = {
'title': 'Title',
'slug': '#',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'pp',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
page2 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "en")
new_page_data = {
'title': 'Title',
'slug': 'test',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
page3 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "en", parent=page2)
page3.title_set.update(path="hello/")
page3 = page3.reload()
new_page_data = {
'title': 'Title',
'slug': 'test',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None, instance=page3)
self.assertFalse(form.is_valid())
def test_reverse_id_error_location(self):
''' Test moving the reverse_id validation error to a field specific one '''
# this is the Reverse ID we'll re-use to break things.
dupe_id = 'p1'
curren_site = Site.objects.get_current()
create_page('Page 1', 'nav_playground.html', 'en', reverse_id=dupe_id)
page2 = create_page('Page 2', 'nav_playground.html', 'en')
# Assemble a bunch of data to test the page form
page2_data = {
'language': 'en',
'site': curren_site.pk,
'reverse_id': dupe_id,
'template': 'col_two.html',
}
form = AdvancedSettingsForm(
data=page2_data,
instance=page2,
files=None,
)
self.assertFalse(form.is_valid())
# reverse_id is the only item that is in __all__ as every other field
# has it's own clean method. Moving it to be a field error means
# __all__ is now not available.
self.assertNotIn('__all__', form.errors)
# In moving it to it's own field, it should be in form.errors, and
# the values contained therein should match these.
self.assertIn('reverse_id', form.errors)
self.assertEqual(1, len(form.errors['reverse_id']))
self.assertEqual([u'A page with this reverse URL id exists already.'],
form.errors['reverse_id'])
page2_data['reverse_id'] = ""
form = AdvancedSettingsForm(
data=page2_data,
instance=page2,
files=None,
)
self.assertTrue(form.is_valid())
admin_user = self._get_guys(admin_only=True)
# reset some of page2_data so we can use cms.api.create_page
page2 = page2.reload()
page2.site = curren_site
page2.save()
with self.login_user_context(admin_user):
# re-reset the page2_data for the admin form instance.
page2_data['reverse_id'] = dupe_id
page2_data['site'] = curren_site.pk
# post to the admin change form for page 2, and test that the
# reverse_id form row has an errors class. Django's admin avoids
# collapsing these, so that the error is visible.
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page2_data)
self.assertContains(resp, '<div class="form-row errors field-reverse_id">')
def test_advanced_settings_endpoint(self):
admin_user = self.get_superuser()
site = Site.objects.get_current()
page = create_page('Page 1', 'nav_playground.html', 'en')
page_data = {
'language': 'en',
'site': site.pk,
'template': 'col_two.html',
}
path = admin_reverse('cms_page_advanced', args=(page.pk,))
with self.login_user_context(admin_user):
en_path = path + u"?language=en"
redirect_path = admin_reverse('cms_page_changelist') + '?language=en'
response = self.client.post(en_path, page_data)
self.assertRedirects(response, redirect_path)
self.assertEqual(Page.objects.get(pk=page.pk).template, 'col_two.html')
# Now switch it up by adding german as the current language
# Note that german has not been created as page translation.
page_data['language'] = 'de'
page_data['template'] = 'nav_playground.html'
with self.login_user_context(admin_user):
de_path = path + u"?language=de"
redirect_path = admin_reverse('cms_page_change', args=(page.pk,)) + '?language=de'
response = self.client.post(de_path, page_data)
# Assert user is redirected to basic settings.
self.assertRedirects(response, redirect_path)
# Make sure no change was made
self.assertEqual(Page.objects.get(pk=page.pk).template, 'col_two.html')
de_translation = create_title('de', title='Page 1', page=page.reload())
de_translation.slug = ''
de_translation.save()
# Now try again but slug is set to empty string.
page_data['language'] = 'de'
page_data['template'] = 'nav_playground.html'
with self.login_user_context(admin_user):
de_path = path + u"?language=de"
response = self.client.post(de_path, page_data)
# Assert user is not redirected because there was a form error
self.assertEqual(response.status_code, 200)
# Make sure no change was made
self.assertEqual(Page.objects.get(pk=page.pk).template, 'col_two.html')
de_translation.slug = 'someslug'
de_translation.save()
# Now try again but with the title having a slug.
page_data['language'] = 'de'
page_data['template'] = 'nav_playground.html'
with self.login_user_context(admin_user):
en_path = path + u"?language=de"
redirect_path = admin_reverse('cms_page_changelist') + '?language=de'
response = self.client.post(en_path, page_data)
self.assertRedirects(response, redirect_path)
self.assertEqual(Page.objects.get(pk=page.pk).template, 'nav_playground.html')
def test_advanced_settings_endpoint_fails_gracefully(self):
admin_user = self.get_superuser()
site = Site.objects.get_current()
page = create_page('Page 1', 'nav_playground.html', 'en')
page_data = {
'language': 'en',
'site': site.pk,
'template': 'col_two.html',
}
path = admin_reverse('cms_page_advanced', args=(page.pk,))
# It's important to test fields that are validated
# automatically by Django vs fields that are validated
# via the clean() method by us.
# Fields validated by Django will not be in cleaned data
# if they have an error so if we rely on these in the clean()
# method then an error will be raised.
# So test that the form short circuits if there's errors.
page_data['application_urls'] = 'TestApp'
page_data['site'] = '1000'
with self.login_user_context(admin_user):
de_path = path + u"?language=de"
response = self.client.post(de_path, page_data)
# Assert user is not redirected because there was a form error
self.assertEqual(response.status_code, 200)
page = page.reload()
# Make sure no change was made
self.assertEqual(page.application_urls, None)
self.assertEqual(page.site.pk, site.pk)
def test_create_page_type(self):
page = create_page('Test', 'static.html', 'en', published=True, reverse_id="home")
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
page.publish('en')
self.assertEqual(Page.objects.count(), 2)
self.assertEqual(CMSPlugin.objects.count(), 4)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get(
"%s?copy_target=%s&language=%s" % (admin_reverse("cms_page_add_page_type"), page.pk, 'en'))
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 3)
self.assertEqual(Page.objects.filter(reverse_id="page_types").count(), 1)
page_types = Page.objects.get(reverse_id='page_types')
url = response.url if hasattr(response, 'url') else response['Location']
expected_url_params = QueryDict(
'target=%s&position=first-child&add_page_type=1©_target=%s&language=en' % (page_types.pk, page.pk))
response_url_params = QueryDict(urlparse(url).query)
self.assertDictEqual(expected_url_params, response_url_params)
response = self.client.get("%s?copy_target=%s&language=%s" % (
admin_reverse("cms_page_add_page_type"), page.pk, 'en'), follow=True)
self.assertEqual(response.status_code, 200)
# test no page types if no page types there
response = self.client.get(admin_reverse('cms_page_add'))
self.assertNotContains(response, "page_type")
# create out first page type
page_data = {
'title': 'type1', 'slug': 'type1', '_save': 1, 'template': 'static.html', 'site': 1,
'language': 'en'
}
response = self.client.post(
"%s?target=%s&position=first-child&add_page_type=1©_target=%s&language=en" % (
URL_CMS_PAGE_ADD, page_types.pk, page.pk
), data=page_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 4)
self.assertEqual(CMSPlugin.objects.count(), 6)
response = self.client.get(admin_reverse('cms_page_add'))
self.assertContains(response, "page_type")
# no page types available if you use the copy_target
response = self.client.get("%s?copy_target=%s&language=en" % (admin_reverse('cms_page_add'), page.pk))
self.assertNotContains(response, "page_type")
def test_render_edit_mode(self):
from django.core.cache import cache
cache.clear()
homepage = create_page('Test', 'static.html', 'en', published=True)
Page.set_homepage(homepage)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
user = self.get_superuser()
self.assertEqual(Placeholder.objects.all().count(), 4)
with self.login_user_context(user):
output = force_text(
self.client.get(
'/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
).content
)
self.assertIn('<b>Test</b>', output)
self.assertEqual(Placeholder.objects.all().count(), 9)
self.assertEqual(StaticPlaceholder.objects.count(), 2)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
output = force_text(
self.client.get(
'/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
).content
)
self.assertIn('<b>Test</b>', output)
def test_tree_view_queries(self):
from django.core.cache import cache
cache.clear()
for i in range(10):
create_page('Test%s' % i, 'col_two.html', 'en', published=True)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
user = self.get_superuser()
with self.login_user_context(user):
with self.assertNumQueries(FuzzyInt(12, 22)):
force_text(self.client.get(URL_CMS_PAGE))
def test_smart_link_published_pages(self):
admin, staff_guy = self._get_guys()
page_url = URL_CMS_PAGE_PUBLISHED # Not sure how to achieve this with reverse...
create_page('home', 'col_two.html', 'en', published=True)
with self.login_user_context(staff_guy):
multi_title_page = create_page('main_title', 'col_two.html', 'en', published=True,
overwrite_url='overwritten_url',
menu_title='menu_title')
title = multi_title_page.get_title_obj()
title.page_title = 'page_title'
title.save()
multi_title_page.save()
publish_page(multi_title_page, admin, 'en')
# Non ajax call should return a 403 as this page shouldn't be accessed by anything else but ajax queries
self.assertEqual(403, self.client.get(page_url).status_code)
self.assertEqual(200,
self.client.get(page_url, HTTP_X_REQUESTED_WITH='XMLHttpRequest').status_code
)
# Test that the query param is working as expected.
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'main_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'menu_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'overwritten_url'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'page_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
class AdminPageEditContentSizeTests(AdminTestsBase):
"""
System user count influences the size of the page edit page,
but the users are only 2 times present on the page
The test relates to extra=0
at PagePermissionInlineAdminForm and ViewRestrictionInlineAdmin
"""
def test_editpage_contentsize(self):
"""
Expected a username only 2 times in the content, but a relationship
between usercount and pagesize
"""
with self.settings(CMS_PERMISSION=True):
admin_user = self.get_superuser()
PAGE_NAME = 'TestPage'
USER_NAME = 'test_size_user_0'
current_site = Site.objects.get(pk=1)
page = create_page(PAGE_NAME, "nav_playground.html", "en", site=current_site, created_by=admin_user)
page.save()
self._page = page
with self.login_user_context(admin_user):
url = base.URL_CMS_PAGE_PERMISSION_CHANGE % self._page.pk
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
old_response_size = len(response.content)
old_user_count = get_user_model().objects.count()
# create additionals user and reload the page
get_user_model().objects.create_user(username=USER_NAME, email=USER_NAME + '@django-cms.org',
password=USER_NAME)
user_count = get_user_model().objects.count()
more_users_in_db = old_user_count < user_count
# we have more users
self.assertTrue(more_users_in_db, "New users got NOT created")
response = self.client.get(url)
new_response_size = len(response.content)
page_size_grown = old_response_size < new_response_size
# expect that the pagesize gets influenced by the useramount of the system
self.assertTrue(page_size_grown, "Page size has not grown after user creation")
# usernames are only 2 times in content
text = smart_str(response.content, response.charset)
foundcount = text.count(USER_NAME)
# 2 forms contain usernames as options
self.assertEqual(foundcount, 2,
"Username %s appeared %s times in response.content, expected 2 times" % (
USER_NAME, foundcount))
class AdminPageTreeTests(AdminTestsBase):
def test_move_node(self):
admin_user, staff = self._get_guys()
page_admin = self.admin_class
alpha = create_page('Alpha', 'nav_playground.html', 'en', published=True)
beta = create_page('Beta', TEMPLATE_INHERITANCE_MAGIC, 'en', published=True)
gamma = create_page('Gamma', TEMPLATE_INHERITANCE_MAGIC, 'en', published=True)
delta = create_page('Delta', TEMPLATE_INHERITANCE_MAGIC, 'en', published=True)
# Current structure:
# <root>
# ⊢ Alpha
# ⊢ Beta
# ⊢ Gamma
# ⊢ Delta
# Move Beta to be a child of Alpha
data = {
'id': beta.pk,
'position': 0,
'target': alpha.pk,
}
with self.login_user_context(admin_user):
request = self.get_request(post_data=data)
response = page_admin.move_page(request, page_id=beta.pk)
data = json.loads(response.content.decode('utf8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(data['status'], 200)
self.assertEqual(alpha.reload().get_descendants().count(), 1)
# Current structure:
# <root>
# ⊢ Alpha
# ⊢ Beta
# ⊢ Gamma
# ⊢ Delta
# Move Gamma to be a child of Beta
data = {
'id': gamma.pk,
'position': 0,
'target': beta.pk,
}
with self.login_user_context(admin_user):
request = self.get_request(post_data=data)
response = page_admin.move_page(request, page_id=gamma.pk)
data = json.loads(response.content.decode('utf8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(data['status'], 200)
self.assertEqual(alpha.reload().get_descendants().count(), 2)
self.assertEqual(beta.reload().get_descendants().count(), 1)
# Current structure:
# <root>
# ⊢ Alpha
# ⊢ Beta
# ⊢ Gamma
# ⊢ Delta
# Move Delta to be a child of Beta
data = {
'id': delta.pk,
'position': 0,
'target': gamma.pk,
}
with self.login_user_context(admin_user):
request = self.get_request(post_data=data)
response = page_admin.move_page(request, page_id=delta.pk)
data = json.loads(response.content.decode('utf8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(data['status'], 200)
self.assertEqual(alpha.reload().get_descendants().count(), 3)
self.assertEqual(beta.reload().get_descendants().count(), 2)
self.assertEqual(gamma.reload().get_descendants().count(), 1)
# Current structure:
# <root>
# ⊢ Alpha
# ⊢ Beta
# ⊢ Gamma
# ⊢ Delta
# Move Beta to the root as node #1 (positions are 0-indexed)
data = {
'id': beta.pk,
'position': 1,
}
with self.login_user_context(admin_user):
request = self.get_request(post_data=data)
response = page_admin.move_page(request, page_id=beta.pk)
data = json.loads(response.content.decode('utf8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(data['status'], 200)
self.assertEqual(alpha.reload().get_descendants().count(), 0)
self.assertEqual(beta.reload().get_descendants().count(), 2)
self.assertEqual(gamma.reload().get_descendants().count(), 1)
# Current structure:
# <root>
# ⊢ Alpha
# ⊢ Beta
# ⊢ Gamma
# ⊢ Delta
# Move Beta to be a child of Alpha again
data = {
'id': beta.pk,
'position': 0,
'target': alpha.pk,
}
with self.login_user_context(admin_user):
request = self.get_request(post_data=data)
response = page_admin.move_page(request, page_id=beta.pk)
data = json.loads(response.content.decode('utf8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(data['status'], 200)
self.assertEqual(alpha.reload().get_descendants().count(), 3)
self.assertEqual(beta.reload().get_descendants().count(), 2)
self.assertEqual(gamma.reload().get_descendants().count(), 1)
# Current structure:
# <root>
# ⊢ Alpha
# ⊢ Beta
# ⊢ Gamma
# ⊢ Delta
# Move Gamma to the root as node #1 (positions are 0-indexed)
data = {
'id': gamma.pk,
'position': 1,
}
with self.login_user_context(admin_user):
request = self.get_request(post_data=data)
response = page_admin.move_page(request, page_id=gamma.pk)
data = json.loads(response.content.decode('utf8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(data['status'], 200)
self.assertEqual(alpha.reload().get_descendants().count(), 1)
self.assertEqual(beta.reload().get_descendants().count(), 0)
self.assertEqual(gamma.reload().get_descendants().count(), 1)
# Current structure:
# <root>
# ⊢ Alpha
# ⊢ Beta
# ⊢ Gamma
# ⊢ Delta
# Move Delta to the root as node #1 (positions are 0-indexed)
data = {
'id': delta.pk,
'position': 1,
}
with self.login_user_context(admin_user):
request = self.get_request(post_data=data)
response = page_admin.move_page(request, page_id=delta.pk)
data = json.loads(response.content.decode('utf8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(data['status'], 200)
self.assertEqual(alpha.reload().get_descendants().count(), 1)
self.assertEqual(beta.reload().get_descendants().count(), 0)
self.assertEqual(gamma.reload().get_descendants().count(), 0)
# Current structure:
# <root>
# ⊢ Alpha
# ⊢ Beta
# ⊢ Delta
# ⊢ Gamma
# Move Gamma to be a child of Delta
data = {
'id': gamma.pk,
'position': 1,
'target': delta.pk,
}
with self.login_user_context(admin_user):
request = self.get_request(post_data=data)
response = page_admin.move_page(request, page_id=gamma.pk)
data = json.loads(response.content.decode('utf8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(data['status'], 200)
self.assertEqual(alpha.reload().get_descendants().count(), 1)
self.assertEqual(beta.reload().get_descendants().count(), 0)
self.assertEqual(gamma.reload().get_descendants().count(), 0)
self.assertEqual(delta.reload().get_descendants().count(), 1)
# Final structure:
# <root>
# ⊢ Alpha
# ⊢ Beta
# ⊢ Delta
# ⊢ Gamma
| timgraham/django-cms | cms/tests/test_admin.py | Python | bsd-3-clause | 63,904 |
# Copyright (c) 2010-2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2010-2011 Advanced Micro Devices, Inc.
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Kevin Lim
from m5.objects import *
from Benchmarks import *
from m5.util import *
class CowIdeDisk(IdeDisk):
image = CowDiskImage(child=RawDiskImage(read_only=True),
read_only=False)
def childImage(self, ci):
self.image.child.image_file = ci
class MemBus(CoherentXBar):
badaddr_responder = BadAddr()
default = Self.badaddr_responder.pio
def makeLinuxAlphaSystem(mem_mode, mdesc = None, ruby = False):
class BaseTsunami(Tsunami):
ethernet = NSGigE(pci_bus=0, pci_dev=1, pci_func=0)
ide = IdeController(disks=[Parent.disk0, Parent.disk2],
pci_func=0, pci_dev=0, pci_bus=0)
self = LinuxAlphaSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.tsunami = BaseTsunami()
# Create the io bus to connect all device ports
self.iobus = NoncoherentXBar()
self.tsunami.attachIO(self.iobus)
self.tsunami.ide.pio = self.iobus.master
self.tsunami.ide.config = self.iobus.master
self.tsunami.ethernet.pio = self.iobus.master
self.tsunami.ethernet.config = self.iobus.master
if ruby:
# Store the dma devices for later connection to dma ruby ports.
# Append an underscore to dma_ports to avoid the SimObjectVector check.
self._dma_ports = [self.tsunami.ide.dma, self.tsunami.ethernet.dma]
else:
self.membus = MemBus()
# By default the bridge responds to all addresses above the I/O
# base address (including the PCI config space)
IO_address_space_base = 0x80000000000
self.bridge = Bridge(delay='50ns',
ranges = [AddrRange(IO_address_space_base, Addr.max)])
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.tsunami.ide.dma = self.iobus.slave
self.tsunami.ethernet.dma = self.iobus.slave
self.system_port = self.membus.slave
self.mem_ranges = [AddrRange(mdesc.mem())]
self.disk0 = CowIdeDisk(driveID='master')
self.disk2 = CowIdeDisk(driveID='master')
self.disk0.childImage(mdesc.disk())
self.disk2.childImage(disk('linux-bigswap2.img'))
self.simple_disk = SimpleDisk(disk=RawDiskImage(image_file = mdesc.disk(),
read_only = True))
self.intrctrl = IntrControl()
self.mem_mode = mem_mode
self.terminal = Terminal()
self.kernel = binary('vmlinux')
self.pal = binary('ts_osfpal')
self.console = binary('console')
self.boot_osflags = 'root=/dev/hda1 console=ttyS0'
return self
def makeSparcSystem(mem_mode, mdesc = None):
# Constants from iob.cc and uart8250.cc
iob_man_addr = 0x9800000000
uart_pio_size = 8
class CowMmDisk(MmDisk):
image = CowDiskImage(child=RawDiskImage(read_only=True),
read_only=False)
def childImage(self, ci):
self.image.child.image_file = ci
self = SparcSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentXBar()
self.membus = MemBus()
self.bridge = Bridge(delay='50ns')
self.t1000 = T1000()
self.t1000.attachOnChipIO(self.membus)
self.t1000.attachIO(self.iobus)
self.mem_ranges = [AddrRange(Addr('1MB'), size = '64MB'),
AddrRange(Addr('2GB'), size ='256MB')]
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.rom.port = self.membus.master
self.nvram.port = self.membus.master
self.hypervisor_desc.port = self.membus.master
self.partition_desc.port = self.membus.master
self.intrctrl = IntrControl()
self.disk0 = CowMmDisk()
self.disk0.childImage(disk('disk.s10hw2'))
self.disk0.pio = self.iobus.master
# The puart0 and hvuart are placed on the IO bus, so create ranges
# for them. The remaining IO range is rather fragmented, so poke
# holes for the iob and partition descriptors etc.
self.bridge.ranges = \
[
AddrRange(self.t1000.puart0.pio_addr,
self.t1000.puart0.pio_addr + uart_pio_size - 1),
AddrRange(self.disk0.pio_addr,
self.t1000.fake_jbi.pio_addr +
self.t1000.fake_jbi.pio_size - 1),
AddrRange(self.t1000.fake_clk.pio_addr,
iob_man_addr - 1),
AddrRange(self.t1000.fake_l2_1.pio_addr,
self.t1000.fake_ssi.pio_addr +
self.t1000.fake_ssi.pio_size - 1),
AddrRange(self.t1000.hvuart.pio_addr,
self.t1000.hvuart.pio_addr + uart_pio_size - 1)
]
self.reset_bin = binary('reset_new.bin')
self.hypervisor_bin = binary('q_new.bin')
self.openboot_bin = binary('openboot_new.bin')
self.nvram_bin = binary('nvram1')
self.hypervisor_desc_bin = binary('1up-hv.bin')
self.partition_desc_bin = binary('1up-md.bin')
self.system_port = self.membus.slave
return self
def makeArmSystem(mem_mode, machine_type, mdesc = None,
dtb_filename = None, bare_metal=False,
sdcard_image = "sdcard-1g-mxplayer.img"):
assert machine_type
if bare_metal:
self = ArmSystem()
else:
self = LinuxArmSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentXBar()
self.membus = MemBus()
self.membus.badaddr_responder.warn_access = "warn"
self.bridge = Bridge(delay='50ns')
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.mem_mode = mem_mode
if machine_type == "RealView_PBX":
self.realview = RealViewPBX()
elif machine_type == "RealView_EB":
self.realview = RealViewEB()
elif machine_type == "VExpress_ELT":
self.realview = VExpress_ELT()
elif machine_type == "VExpress_EMM":
self.realview = VExpress_EMM()
elif machine_type == "VExpress_EMM64":
self.realview = VExpress_EMM64()
else:
print "Unknown Machine Type"
sys.exit(1)
self.cf0 = CowIdeDisk(driveID='master')
self.cf2 = CowIdeDisk(driveID='master')
self.cf0.childImage(mdesc.disk())
self.cf2.childImage(disk(sdcard_image))
# Attach any PCI devices this platform supports
self.realview.attachPciDevices()
# default to an IDE controller rather than a CF one
try:
self.realview.ide.disks = [self.cf0, self.cf2]
except:
self.realview.cf_ctrl.disks = [self.cf0, self.cf2]
if bare_metal:
# EOT character on UART will end the simulation
self.realview.uart.end_on_eot = True
self.mem_ranges = [AddrRange(self.realview.mem_start_addr,
size = mdesc.mem())]
else:
if machine_type == "VExpress_EMM64":
self.kernel = binary('vmlinux-3.16-aarch64-vexpress-emm64-pcie')
elif machine_type == "VExpress_EMM":
self.kernel = binary('vmlinux-3.3-arm-vexpress-emm-pcie')
else:
self.kernel = binary('vmlinux.arm.smp.fb.2.6.38.8')
if dtb_filename:
self.dtb_filename = binary(dtb_filename)
self.machine_type = machine_type
# Ensure that writes to the UART actually go out early in the boot
boot_flags = 'earlyprintk=pl011,0x1c090000 console=ttyAMA0 ' + \
'lpj=19988480 norandmaps rw loglevel=8 ' + \
'mem=%s root=/dev/sda1' % mdesc.mem()
self.mem_ranges = []
size_remain = long(Addr(mdesc.mem()))
for region in self.realview._mem_regions:
if size_remain > long(region[1]):
self.mem_ranges.append(AddrRange(region[0], size=region[1]))
size_remain = size_remain - long(region[1])
else:
self.mem_ranges.append(AddrRange(region[0], size=size_remain))
size_remain = 0
break
warn("Memory size specified spans more than one region. Creating" \
" another memory controller for that range.")
if size_remain > 0:
fatal("The currently selected ARM platforms doesn't support" \
" the amount of DRAM you've selected. Please try" \
" another platform")
self.realview.setupBootLoader(self.membus, self, binary)
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.flags_addr = self.realview.realview_io.pio_addr + 0x30
if mdesc.disk().lower().count('android'):
boot_flags += " init=/init "
self.boot_osflags = boot_flags
self.realview.attachOnChipIO(self.membus, self.bridge)
self.realview.attachIO(self.iobus)
self.intrctrl = IntrControl()
self.terminal = Terminal()
self.vncserver = VncServer()
self.system_port = self.membus.slave
return self
def makeLinuxMipsSystem(mem_mode, mdesc = None):
class BaseMalta(Malta):
ethernet = NSGigE(pci_bus=0, pci_dev=1, pci_func=0)
ide = IdeController(disks=[Parent.disk0, Parent.disk2],
pci_func=0, pci_dev=0, pci_bus=0)
self = LinuxMipsSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentXBar()
self.membus = MemBus()
self.bridge = Bridge(delay='50ns')
self.mem_ranges = [AddrRange('1GB')]
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.disk0 = CowIdeDisk(driveID='master')
self.disk2 = CowIdeDisk(driveID='master')
self.disk0.childImage(mdesc.disk())
self.disk2.childImage(disk('linux-bigswap2.img'))
self.malta = BaseMalta()
self.malta.attachIO(self.iobus)
self.malta.ide.pio = self.iobus.master
self.malta.ide.config = self.iobus.master
self.malta.ide.dma = self.iobus.slave
self.malta.ethernet.pio = self.iobus.master
self.malta.ethernet.config = self.iobus.master
self.malta.ethernet.dma = self.iobus.slave
self.simple_disk = SimpleDisk(disk=RawDiskImage(image_file = mdesc.disk(),
read_only = True))
self.intrctrl = IntrControl()
self.mem_mode = mem_mode
self.terminal = Terminal()
self.kernel = binary('mips/vmlinux')
self.console = binary('mips/console')
self.boot_osflags = 'root=/dev/hda1 console=ttyS0'
self.system_port = self.membus.slave
return self
def x86IOAddress(port):
IO_address_space_base = 0x8000000000000000
return IO_address_space_base + port
def connectX86ClassicSystem(x86_sys, numCPUs):
# Constants similar to x86_traits.hh
IO_address_space_base = 0x8000000000000000
pci_config_address_space_base = 0xc000000000000000
interrupts_address_space_base = 0xa000000000000000
APIC_range_size = 1 << 12;
x86_sys.membus = MemBus()
# North Bridge
x86_sys.iobus = NoncoherentXBar()
x86_sys.bridge = Bridge(delay='50ns')
x86_sys.bridge.master = x86_sys.iobus.slave
x86_sys.bridge.slave = x86_sys.membus.master
# Allow the bridge to pass through the IO APIC (two pages),
# everything in the IO address range up to the local APIC, and
# then the entire PCI address space and beyond
x86_sys.bridge.ranges = \
[
AddrRange(x86_sys.pc.south_bridge.io_apic.pio_addr,
x86_sys.pc.south_bridge.io_apic.pio_addr +
APIC_range_size - 1),
AddrRange(IO_address_space_base,
interrupts_address_space_base - 1),
AddrRange(pci_config_address_space_base,
Addr.max)
]
# Create a bridge from the IO bus to the memory bus to allow access to
# the local APIC (two pages)
x86_sys.apicbridge = Bridge(delay='50ns')
x86_sys.apicbridge.slave = x86_sys.iobus.master
x86_sys.apicbridge.master = x86_sys.membus.slave
x86_sys.apicbridge.ranges = [AddrRange(interrupts_address_space_base,
interrupts_address_space_base +
numCPUs * APIC_range_size
- 1)]
# connect the io bus
x86_sys.pc.attachIO(x86_sys.iobus)
x86_sys.system_port = x86_sys.membus.slave
def connectX86RubySystem(x86_sys):
# North Bridge
x86_sys.iobus = NoncoherentXBar()
# add the ide to the list of dma devices that later need to attach to
# dma controllers
x86_sys._dma_ports = [x86_sys.pc.south_bridge.ide.dma]
x86_sys.pc.attachIO(x86_sys.iobus, x86_sys._dma_ports)
def makeX86System(mem_mode, numCPUs = 1, mdesc = None, self = None,
Ruby = False):
if self == None:
self = X86System()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.mem_mode = mem_mode
# Physical memory
# On the PC platform, the memory region 0xC0000000-0xFFFFFFFF is reserved
# for various devices. Hence, if the physical memory size is greater than
# 3GB, we need to split it into two parts.
excess_mem_size = \
convert.toMemorySize(mdesc.mem()) - convert.toMemorySize('3GB')
if excess_mem_size <= 0:
self.mem_ranges = [AddrRange(mdesc.mem())]
else:
warn("Physical memory size specified is %s which is greater than " \
"3GB. Twice the number of memory controllers would be " \
"created." % (mdesc.mem()))
self.mem_ranges = [AddrRange('3GB'),
AddrRange(Addr('4GB'), size = excess_mem_size)]
# Platform
self.pc = Pc()
# Create and connect the busses required by each memory system
if Ruby:
connectX86RubySystem(self)
else:
connectX86ClassicSystem(self, numCPUs)
self.intrctrl = IntrControl()
# Disks
disk0 = CowIdeDisk(driveID='master')
disk2 = CowIdeDisk(driveID='master')
disk0.childImage(mdesc.disk())
disk2.childImage(disk('linux-bigswap2.img'))
self.pc.south_bridge.ide.disks = [disk0, disk2]
# Add in a Bios information structure.
structures = [X86SMBiosBiosInformation()]
self.smbios_table.structures = structures
# Set up the Intel MP table
base_entries = []
ext_entries = []
for i in xrange(numCPUs):
bp = X86IntelMPProcessor(
local_apic_id = i,
local_apic_version = 0x14,
enable = True,
bootstrap = (i == 0))
base_entries.append(bp)
io_apic = X86IntelMPIOAPIC(
id = numCPUs,
version = 0x11,
enable = True,
address = 0xfec00000)
self.pc.south_bridge.io_apic.apic_id = io_apic.id
base_entries.append(io_apic)
isa_bus = X86IntelMPBus(bus_id = 0, bus_type='ISA')
base_entries.append(isa_bus)
pci_bus = X86IntelMPBus(bus_id = 1, bus_type='PCI')
base_entries.append(pci_bus)
connect_busses = X86IntelMPBusHierarchy(bus_id=0,
subtractive_decode=True, parent_bus=1)
ext_entries.append(connect_busses)
pci_dev4_inta = X86IntelMPIOIntAssignment(
interrupt_type = 'INT',
polarity = 'ConformPolarity',
trigger = 'ConformTrigger',
source_bus_id = 1,
source_bus_irq = 0 + (4 << 2),
dest_io_apic_id = io_apic.id,
dest_io_apic_intin = 16)
base_entries.append(pci_dev4_inta)
def assignISAInt(irq, apicPin):
assign_8259_to_apic = X86IntelMPIOIntAssignment(
interrupt_type = 'ExtInt',
polarity = 'ConformPolarity',
trigger = 'ConformTrigger',
source_bus_id = 0,
source_bus_irq = irq,
dest_io_apic_id = io_apic.id,
dest_io_apic_intin = 0)
base_entries.append(assign_8259_to_apic)
assign_to_apic = X86IntelMPIOIntAssignment(
interrupt_type = 'INT',
polarity = 'ConformPolarity',
trigger = 'ConformTrigger',
source_bus_id = 0,
source_bus_irq = irq,
dest_io_apic_id = io_apic.id,
dest_io_apic_intin = apicPin)
base_entries.append(assign_to_apic)
assignISAInt(0, 2)
assignISAInt(1, 1)
for i in range(3, 15):
assignISAInt(i, i)
self.intel_mp_table.base_entries = base_entries
self.intel_mp_table.ext_entries = ext_entries
def makeLinuxX86System(mem_mode, numCPUs = 1, mdesc = None,
Ruby = False):
self = LinuxX86System()
# Build up the x86 system and then specialize it for Linux
makeX86System(mem_mode, numCPUs, mdesc, self, Ruby)
# We assume below that there's at least 1MB of memory. We'll require 2
# just to avoid corner cases.
phys_mem_size = sum(map(lambda r: r.size(), self.mem_ranges))
assert(phys_mem_size >= 0x200000)
assert(len(self.mem_ranges) <= 2)
entries = \
[
# Mark the first megabyte of memory as reserved
X86E820Entry(addr = 0, size = '639kB', range_type = 1),
X86E820Entry(addr = 0x9fc00, size = '385kB', range_type = 2),
# Mark the rest of physical memory as available
X86E820Entry(addr = 0x100000,
size = '%dB' % (self.mem_ranges[0].size() - 0x100000),
range_type = 1),
# Reserve the last 16kB of the 32-bit address space for the
# m5op interface
X86E820Entry(addr=0xFFFF0000, size='64kB', range_type=2),
]
# In case the physical memory is greater than 3GB, we split it into two
# parts and add a separate e820 entry for the second part. This entry
# starts at 0x100000000, which is the first address after the space
# reserved for devices.
if len(self.mem_ranges) == 2:
entries.append(X86E820Entry(addr = 0x100000000,
size = '%dB' % (self.mem_ranges[1].size()), range_type = 1))
self.e820_table.entries = entries
# Command line
self.boot_osflags = 'earlyprintk=ttyS0 console=ttyS0 lpj=7999923 ' + \
'root=/dev/hda1'
self.kernel = binary('x86_64-vmlinux-2.6.22.9')
return self
def makeDualRoot(full_system, testSystem, driveSystem, dumpfile):
self = Root(full_system = full_system)
self.testsys = testSystem
self.drivesys = driveSystem
self.etherlink = EtherLink()
if hasattr(testSystem, 'realview'):
self.etherlink.int0 = Parent.testsys.realview.ethernet.interface
self.etherlink.int1 = Parent.drivesys.realview.ethernet.interface
elif hasattr(testSystem, 'tsunami'):
self.etherlink.int0 = Parent.testsys.tsunami.ethernet.interface
self.etherlink.int1 = Parent.drivesys.tsunami.ethernet.interface
else:
fatal("Don't know how to connect these system together")
if dumpfile:
self.etherdump = EtherDump(file=dumpfile)
self.etherlink.dump = Parent.etherdump
return self
| lokeshjindal15/gem5_transform | configs/common/FSConfig.py | Python | bsd-3-clause | 21,365 |
Subsets and Splits