repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jlweand/DssVisualizer | app/plugins/datasource/elasticsearch/multiIncludeThroughput.py | 1 | 5393 | # Copyright (C) 2016 Jamie Acosta, Jennifer Weand, Juan Soto, Mark Eby, Mark Smith, Andres Olivas
#
# This file is part of DssVisualizer.
#
# DssVisualizer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DssVisualizer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DssVisualizer. If not, see <http://www.gnu.org/licenses/>.
from plugins.datasource.elasticsearch.annotations import Annotations
from plugins.datasource.elasticsearch.common import Common
from elasticsearch import Elasticsearch
from plugins.datasource.elasticsearch.selecting import Selecting
class MultiIncludeThroughput:
def __init__(self):
self.esIndex = Common().getIndexName()
self.multiIncludeThroughputDocType = "multiincludethroughput"
self.resultSize = Common().getSizeToReturn()
def importMultiIncludeThroughputData(self, jsonObjects):
es = Elasticsearch()
es.indices.create(index=self.esIndex, ignore=400)
insertedCount = 0
for json in jsonObjects:
result = es.index(index=self.esIndex, doc_type=self.multiIncludeThroughputDocType, body=json)
insertedCount += result["_shards"]["successful"]
return insertedCount
# select data by date range of the 'start' column
def selectMultiIncludeThroughputData(self, startDate, endDate, techNames, eventNames, eventTechNames):
select = Selecting().generateSelectQuery(startDate, endDate, techNames, eventNames, eventTechNames, False, True)
data = Elasticsearch().search(index=self.esIndex, doc_type=self.multiIncludeThroughputDocType, size=self.resultSize, body=select)
return Selecting().fixAllTheData(data)
# select single data point
def selectMultiIncludeThroughputDataById(self, dataId):
data = Elasticsearch().get(index=self.esIndex, doc_type=self.multiIncludeThroughputDocType, id=dataId)
return Selecting().fixOneData(data)
# add or edits a fixedData record to this data point
def modifyFixedMultiIncludeThroughputData(self, dataId, traffic_xy_id, className, x, y, isDeleted):
updateFixed = {"doc": {"fixedData": {"traffic_xy_id": traffic_xy_id, "className": className, "x": x, "y": y, "isDeleted": isDeleted}}}
result = Elasticsearch().update(index=self.esIndex, doc_type=self.multiIncludeThroughputDocType, body=updateFixed, id = dataId)
return Common().getModfiedCount(result)
# delete the fixedData
def deleteFixedMultiIncludeThroughputData(self, dataId):
deleteFixed = {"script" : "ctx._source.remove(\"fixedData\")"}
result = Elasticsearch().update(index=self.esIndex, doc_type=self.multiIncludeThroughputDocType, body=deleteFixed, id = dataId)
return Common().getModfiedCount(result)
# add or edit an annotation to the object. This will add a single 'annotation' attribute to the object.
def modifyAnnotationMultiIncludeThroughput(self, dataId, annotationText):
return Annotations().modifyAnnotation(self.multiIncludeThroughputDocType, dataId, annotationText)
# add an annotation to an array of annotations for the dataId
def addAnnotationToArrayMultiIncludeThroughput(self, dataId, annotationText):
return Annotations().addAnnotationToArray(self.multiIncludeThroughputDocType, dataId, annotationText)
# edit an annotation in the array of annotations.
def editAnnotationInArrayMultiIncludeThroughput(self, dataId, oldAnnotationText, newAnnotationText):
return Annotations().editAnnotationInArray(self.multiIncludeThroughputDocType, dataId, oldAnnotationText, newAnnotationText)
# delete an annotation from array for the dataId
def deleteAnnotationFromArrayMultiIncludeThroughput(self, dataId, annotationText):
return Annotations().deleteAnnotationFromArray(self.multiIncludeThroughputDocType, dataId, annotationText)
# deletes all annotations for the dataId
def deleteAllAnnotationsForMultiIncludeThroughput(self, dataId):
return Annotations().deleteAllAnnotationsForData(self.multiIncludeThroughputDocType, dataId)
# add an annotation to the timeline, not a datapoint
def addAnnotationToMultiIncludeThroughputTimeline(self, multiIncludeThroughput, annotationText):
return Annotations().addAnnotationToTimeline(self.multiIncludeThroughputDocType, multiIncludeThroughput, annotationText)
# def getDistinctTechNamesForEvents(self, eventNames):
# collection = self.getMultiIncludeThroughputCollection()
# return TechAndEventNames().getDistinctTechNamesForEvents(collection, eventNames)
#
# def getDistinctEventNames(self):
# collection = self.getMultiIncludeThroughputCollection()
# return TechAndEventNames().getDistinctEventNames(collection)
#
# def getDistinctTechAndEventNames(self):
# collection = self.getMultiIncludeThroughputCollection()
# return TechAndEventNames().getDistinctTechAndEventNames(collection)
| gpl-3.0 | 8,172,515,192,321,630,000 | 54.597938 | 142 | 0.76043 | false |
pydanny/dj-stripe | djstripe/contrib/rest_framework/views.py | 1 | 2877 | """
.. module:: dj-stripe.contrib.rest_framework.views.
:synopsis: Views for the dj-stripe REST API.
.. moduleauthor:: Philippe Luickx (@philippeluickx)
"""
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from ...models import Customer
from ...settings import CANCELLATION_AT_PERIOD_END, subscriber_request_callback
from .serializers import CreateSubscriptionSerializer, SubscriptionSerializer
class SubscriptionRestView(APIView):
"""API Endpoints for the Subscription object."""
permission_classes = (IsAuthenticated,)
def get(self, request, **kwargs):
"""
Return the customer's valid subscriptions.
Returns with status code 200.
"""
customer, _created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request)
)
serializer = SubscriptionSerializer(customer.subscription)
return Response(serializer.data)
def post(self, request, **kwargs):
"""
Create a new current subscription for the user.
Returns with status code 201.
"""
serializer = CreateSubscriptionSerializer(data=request.data)
if serializer.is_valid():
try:
customer, _created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request)
)
customer.add_card(serializer.data["stripe_token"])
charge_immediately = serializer.data.get("charge_immediately")
if charge_immediately is None:
charge_immediately = True
customer.subscribe(serializer.data["plan"], charge_immediately)
return Response(serializer.data, status=status.HTTP_201_CREATED)
except Exception:
# TODO: Better error messages
return Response(
"Something went wrong processing the payment.",
status=status.HTTP_400_BAD_REQUEST,
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, **kwargs):
"""
Mark the customers current subscription as canceled.
Returns with status code 204.
"""
try:
customer, _created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request)
)
customer.subscription.cancel(at_period_end=CANCELLATION_AT_PERIOD_END)
return Response(status=status.HTTP_204_NO_CONTENT)
except Exception:
return Response(
"Something went wrong cancelling the subscription.",
status=status.HTTP_400_BAD_REQUEST,
)
| bsd-3-clause | 8,368,867,647,684,724,000 | 33.25 | 82 | 0.63017 | false |
eloylp/scirocco-pyclient | test/unit/requestadapter_test.py | 1 | 13891 | import copy
import json
import os
import unittest
from urllib3.request import urlencode
from sciroccoclient.exceptions import SciroccoInitParamsError
from sciroccoclient.http.requestadapter import RequestsAdapter, RequestAdapterResponse, RequestManagerResponseHandler, \
RequestManagerDataResponseHandler, RequestManagerContentTypeDetector
from sciroccoclient.metadata import MetaDataDescriptor, MetaData, MetaDataHTTPHeadersFilter, \
MetaDataHydrator
from test.unit.mocks import RequestManagerMock, Bunch
class RequestsAdapterTest(unittest.TestCase):
def setUp(self):
metadata_http_headers_filter = MetaDataHTTPHeadersFilter(MetaDataDescriptor(MetaData()))
self.request_adapter = RequestsAdapter(RequestManagerMock(),
RequestManagerResponseHandler(metadata_http_headers_filter,
MetaDataHydrator(),
RequestManagerDataResponseHandler()),
MetaDataDescriptor(MetaData()),
RequestManagerContentTypeDetector())
self.request_adapter_without_runtime = copy.deepcopy(self.request_adapter)
self.request_adapter.api_url = 'https://dds.sandboxwebs.com'
self.request_adapter.node_id = 'af123'
self.request_adapter.auth_token = 'tok'
def test_from_header_fixed_property(self):
self.assertEqual('Scirocco-Node-Source', self.request_adapter.meta_data_http.get_http_header_by_field_name('node_source'))
def test_node_id_mandatory_property(self):
self.assertEqual('af123', self.request_adapter.node_id)
def test_api_token_mandatory_property(self):
self.assertEqual('tok', self.request_adapter.auth_token)
def test_api_url_mandatory_property(self):
self.assertEqual('https://dds.sandboxwebs.com', self.request_adapter.api_url)
def test_property_api_url_exits(self):
self.assertTrue(hasattr(self.request_adapter, "api_url"))
def test_property_node_id_exits(self):
self.assertTrue(hasattr(self.request_adapter, "node_id"))
def test_property_auth_token_exits(self):
self.assertTrue(hasattr(self.request_adapter, "auth_token"))
def test_runtime_properties_are_unsetted(self):
self.assertIsNone(self.request_adapter_without_runtime.api_url)
self.assertIsNone(self.request_adapter_without_runtime.node_id)
self.assertIsNone(self.request_adapter_without_runtime.auth_token)
def test_exec_without_runtime_node_id_fails(self):
self.request_adapter_without_runtime.api_url = 'url'
self.request_adapter_without_runtime.auth_token = '45345'
self.assertRaises(SciroccoInitParamsError, self.request_adapter_without_runtime.request, 'GET', '/resource')
def test_exec_without_runtime_api_url_fails(self):
self.request_adapter_without_runtime.auth_token = '45345'
self.request_adapter_without_runtime.node_id = '45345'
self.assertRaises(SciroccoInitParamsError, self.request_adapter_without_runtime.request, 'GET', '/resource')
def test_exec_without_runtime_auth_token_fails(self):
self.request_adapter_without_runtime.api_url = 'url'
self.request_adapter_without_runtime.node_id = '45345'
self.assertRaises(SciroccoInitParamsError, self.request_adapter_without_runtime.request, 'GET', '/resource')
def test_get_uri(self):
root = 'https://dds.sandboxwebs.com'
self.assertEqual(root + '/resource', self.request_adapter.get_uri('/resource'))
self.assertEqual(root + '/resource/subresource', self.request_adapter.get_uri('/resource/subresource/'))
def test_get_headers_fixed_auth_header(self):
headers = self.request_adapter.get_fixed_headers()
self.assertEqual('tok', headers['Authorization'])
def test_get_headers_fixed_from_header(self):
headers = self.request_adapter.get_fixed_headers()
self.assertEqual('af123', headers['Scirocco-Node-Source'])
def test_request_added_headers_are_present_in_request(self):
headers_fixture = {"headerExtra": "extraextra!"}
data_fixture = {"queryparam1": 23, "queryparam2": 34}
res = self.request_adapter.request('GET', data=data_fixture, headers=headers_fixture)
self.assertEqual(res.http_headers['headerExtra'], 'extraextra!')
def test_request_method_in_request_is_uppercased(self):
headers_fixture = {"headerExtra": "extraextra!"}
data_fixture = {"queryparam1": 23, "queryparam2": 34}
res = self.request_adapter.request('get', data=data_fixture, headers=headers_fixture)
self.assertEqual('GET', res.http_headers['method'])
def test_request_get_method_data_is_same_as_url_params(self):
data_fixture = {"queryparam1": 23, "queryparam2": 34}
res = self.request_adapter.request('GET', '/resource', data_fixture)
self.assertEqual(res.http_headers['url'],
''.join([self.request_adapter.api_url, '/resource', '?', urlencode(data_fixture)]))
def test_request_delete_method_data_is_same_as_url_params(self):
data_fixture = {"queryparam1": 23, "queryparam2": 34}
res = self.request_adapter.request('DELETE', '/resource', data_fixture)
self.assertEqual(res.http_headers['url'],
''.join([self.request_adapter.api_url, '/resource', '?', urlencode(data_fixture)]))
def test_request_post_method_data_is_same_as_body(self):
data_fixture = {"name": "eloy", "test": True}
res = self.request_adapter.request('POST', '/resource', data_fixture.copy())
self.assertEqual(res.payload['name'], 'eloy')
self.assertTrue(res.payload['test'])
def test_request_put_method_data_is_same_as_body(self):
data_fixture = {"name": "eloy", "test": True}
res = self.request_adapter.request('PUT', '/resource', data_fixture)
self.assertEqual(res.payload['name'], 'eloy')
self.assertTrue(res.payload['test'])
def test_request_patch_method_data_is_same_as_body(self):
data_fixture = {"name": "eloy", "test": True}
res = self.request_adapter.request('PATCH', '/resource', data_fixture)
self.assertEqual(res.payload['name'], 'eloy')
self.assertTrue(res.payload['test'])
class RequestManagerResponseHandlerTest(unittest.TestCase):
def setUp(self):
self.metadata_headers_descriptor = MetaDataDescriptor(MetaData())
metadata_http_headers_filter = MetaDataHTTPHeadersFilter(self.metadata_headers_descriptor)
metadata_hydrator = MetaDataHydrator()
data_treatment = RequestManagerDataResponseHandler()
self.response_handler = RequestManagerResponseHandler(metadata_http_headers_filter, metadata_hydrator,
data_treatment)
def test_method_handle_exists(self):
self.assertTrue("handle" in dir(self.response_handler))
def test_return_type_request_adapter_response(self):
response = Bunch(
headers={
self.metadata_headers_descriptor.get_http_header_by_field_name('node_source'): "af123",
"Cookie": "adasdsa"
},
data="asdaasdaasd".encode(),
status=201
)
res = self.response_handler.handle(response)
self.assertIsInstance(res, RequestAdapterResponse)
class RequestAdapterDataResponseHandlerTest(unittest.TestCase):
def setUp(self):
self.data_treat = RequestManagerDataResponseHandler()
def test_method_treat_data_exists(self):
self.assertTrue("treat" in dir(self.data_treat))
def test_treat_data_converts_json(self):
data = '{"name": "test"}'.encode()
res = self.data_treat.treat(data)
self.assertIsInstance(res, dict)
self.assertDictEqual(res, json.loads(data.decode()))
def test_treat_data_plain_text_accept(self):
data = 'string'.encode()
res = self.data_treat.treat(data)
self.assertIsInstance(res, str)
self.assertEqual(res, data.decode())
def test_treat_data_binary(self):
with open(os.path.join(os.path.dirname(__file__), '..', 'fixtures', 'tux.pdf'), 'rb') as f:
data = f.read()
res = self.data_treat.treat(data)
self.assertIsInstance(res, bytes)
class RequestAdapterContentTypeDetectorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'fixtures', 'tux.pdf'), 'rb') as f:
cls.bin_fixture = f.read()
def setUp(self):
self.req_adapter_content_type = RequestManagerContentTypeDetector()
def test_detect_from_body_exists(self):
self.assertTrue("detect_from_body" in dir(self.req_adapter_content_type))
def test_check_for_string_exists(self):
self.assertTrue("check_for_string" in dir(self.req_adapter_content_type))
def test_check_for_object_exists(self):
self.assertTrue("check_for_object" in dir(self.req_adapter_content_type))
def test_check_for_binary_exists(self):
self.assertTrue("check_for_binary" in dir(self.req_adapter_content_type))
def test_detect_from_body_only_accepts_one_param(self):
self.assertRaises(TypeError, self.req_adapter_content_type.detect_from_body, "sdsd", "sdsd")
def test_check_for_string_only_accepts_one_param(self):
self.assertRaises(TypeError, self.req_adapter_content_type.detect_from_body, "sdsd", "sdsd")
def test_check_for_object_only_accepts_one_param(self):
self.assertRaises(TypeError, self.req_adapter_content_type.detect_from_body, "sdsd", "sdsd")
def test_check_for_binary_only_accepts_one_param(self):
self.assertRaises(TypeError, self.req_adapter_content_type.detect_from_body, "sdsd", "sdsd")
def test_detect_from_body_since_body_is_binary(self):
res = self.req_adapter_content_type.detect_from_body(self.bin_fixture)
self.assertEqual(res, 'application/octet-stream')
def test_detect_from_body_since_body_is_object(self):
body = {"name": "body", "type": "json object"}
res = self.req_adapter_content_type.detect_from_body(body)
self.assertEqual(res, 'application/json')
def test_detect_from_body_since_body_is_string(self):
body = "this is a body string."
res = self.req_adapter_content_type.detect_from_body(body)
self.assertEqual(res, 'text/plain')
def test_check_for_string(self):
fixture = "stringggggggggggggg"
res = self.req_adapter_content_type.check_for_string(fixture)
self.assertTrue(res)
fixture = {}
res = self.req_adapter_content_type.check_for_string(fixture)
self.assertFalse(res)
res = self.req_adapter_content_type.check_for_string(self.bin_fixture)
self.assertFalse(res)
def test_check_for_object(self):
fixture = "stringggggggggggggg"
res = self.req_adapter_content_type.check_for_object(fixture)
self.assertFalse(res)
fixture = {"name": "test"}
res = self.req_adapter_content_type.check_for_object(fixture)
self.assertTrue(res)
fixture = '{"name": "test"}'
res = self.req_adapter_content_type.check_for_object(fixture)
self.assertTrue(res)
res = self.req_adapter_content_type.check_for_object(self.bin_fixture)
self.assertFalse(res)
def test_check_for_bin(self):
fixture = "stringggggggggggggg"
res = self.req_adapter_content_type.check_for_binary(fixture)
self.assertFalse(res)
fixture = {}
res = self.req_adapter_content_type.check_for_binary(fixture)
self.assertFalse(res)
res = self.req_adapter_content_type.check_for_binary(self.bin_fixture)
self.assertTrue(res)
class RequestResponseTest(unittest.TestCase):
def setUp(self):
self.cli_resp = RequestAdapterResponse()
def test_attribute_http_headers_exist(self):
self.assertTrue(hasattr(self.cli_resp, 'http_headers'))
def test_attribute_http_status_exist(self):
self.assertTrue(hasattr(self.cli_resp, 'http_status'))
def test_attribute_metadata_exist(self):
self.assertTrue(hasattr(self.cli_resp, 'metadata'))
def test_attribute_payload_exist(self):
self.assertTrue(hasattr(self.cli_resp, 'payload'))
def test_attribute_metadata_initial_value_is_none(self):
self.assertIsNone(self.cli_resp.metadata)
def test_attribute_payload_initial_value_is_none(self):
self.assertIsNone(self.cli_resp.payload)
def test_attribute_http_headers_initial_value_is_none(self):
self.assertIsNone(self.cli_resp.http_headers)
def test_attribute_http_status_initial_value_is_none(self):
self.assertIsNone(self.cli_resp.http_status)
def test_setter_payload_not_modifies_output(self):
data = {"field1": "value1", "field2": "value2"}
self.cli_resp.payload = data
self.assertDictEqual(data, self.cli_resp.payload)
def test_setter_metadata_not_modifies_output(self):
data = {"field1": "value1", "field2": "value2"}
self.cli_resp.metadata = data
self.assertDictEqual(data, self.cli_resp.metadata)
def test_setter_http_headers_not_modifies_output(self):
data = {"field1": "value1", "field2": "value2"}
self.cli_resp.http_headers = data
self.assertDictEqual(data, self.cli_resp.http_headers)
def test_setter_http_status_not_modifies_output(self):
data = 201
self.cli_resp.http_status = data
self.assertEqual(data, self.cli_resp.http_status)
| agpl-3.0 | -8,494,715,706,143,805,000 | 41.221884 | 130 | 0.664243 | false |
zachpodbielniak/PodNet | Linux/Python/PodNet.py | 1 | 1132 | '''
____ _ _ _ _ _ ____ ___
| _ \ ___ __| | \ | | ___| |_ / \ | _ \_ _|
| |_) / _ \ / _` | \| |/ _ \ __| / _ \ | |_) | |
| __/ (_) | (_| | |\ | __/ |_ / ___ \| __/| |
|_| \___/ \__,_|_| \_|\___|\__| /_/ \_\_| |___|
File: PodNet.py
Author: Zach Podbielniak
Last Update: 11/24/2017
Overview: This file sets forth forwarding the PodNet C API to Python, as
well as exposing all other Python related utilties.
This file is part of the PodNet API and comes with no warranty,
use with your own discretion.
'''
from CAlgorithms import *
from CAtomTable import *
from CClock import *
from CError import *
from CSystem import *
from CEvent import *
from CCriticalSection import *
from CHandle import *
from CMutex import *
from CSemaphore import *
from CSpinLock import *
from CThread import *
from CPromise import *
from CFuture import *
from CCallOnce import *
from CLog import *
from CFile import *
from CShapes2D import *
from CCoordinates import *
from CLua import *
from CGpio import *
from CIpv4 import *
from PyScripting import *
from PyString import *
| gpl-3.0 | -1,405,519,365,035,746,000 | 21.64 | 72 | 0.577739 | false |
jul/dsat | mics_utils/universal_testing_client.py | 1 | 2218 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import time, sleep, asctime as _asctime
import sched
from random import randint
import logging
import logging.handlers
import sys,os
import readline
from readline import write_history_file, read_history_file
import zmq
from simplejson import dumps, load, loads
from dsat.message import send_vector, fast_parse_vector, extract_vector_from_dict
from dsat.state import _f
import dsat
print dsat.__version__
_to = sys.argv[1]
_mode = sys.argv[2]
_stable = sys.argv[3] == "bind"
_what = None if len(sys.argv) <= 4 else sys.argv[4]
my_logger = logging.getLogger('Logger')
my_logger.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
my_logger.addHandler(handler)
def D(msg):
my_logger.warning("%r:%s" % (os.getpid(), msg))
HIST = ".hist_zmq"
if not os.path.exists(HIST):
write_history_file(HIST)
read_history_file(HIST)
### init phase load its parameters
context = zmq.Context()
client = context.socket(getattr( zmq, _mode))
sleep(1)
_boc = _stable and "bind" or "connect"
_cnx_mode = getattr(client, _boc )
_cnx_mode(_to)
if _mode == "SUB":
client.setsockopt(zmq.SUBSCRIBE, '')
print "USBSRCIRINB ALL"
sleep(1)
print "address: %r" % _to
print "PATTERN: %r" % _mode
print _boc
print "message template is: %s" % dumps(extract_vector_from_dict({}), indent=4)
abort = False
recv = False
message=_what
while message and not abort:
if "q" == message:
break
if "r" == _what:
recv=True
elif _what:
message = _what
abort = True
else:
message = "".join(iter(lambda :raw_input("%s >" % _to), "ç"))
try:
if recv:
cpt = 0
while True:
print "waiting ..."
print fast_parse_vector(client)
print "RECEVIED"
print (" " * cpt ) + [ "\\", "-" , "/" , "|" ][cpt%4]
cpt += 1
else:
print("SENT %s" % loads(message))
print "\n"
print client.socket_type
send_vector(client, loads(message))
except Exception as e:
print(repr(e))
D("sent %r" % message)
write_history_file(HIST)
| bsd-2-clause | 6,014,448,464,119,381,000 | 23.362637 | 81 | 0.608029 | false |
Grumbel/rfactorlcd | tests/test_ac_state.py | 1 | 1625 | # rFactor Remote LCD
# Copyright (C) 2014 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import struct
import os
from rfactorlcd.ac_state import HandshakeResponse, RTLap, RTCarInfo
datadir = os.path.join(os.path.dirname(__file__), "data")
with open(os.path.join(datadir, "ac-hand.log"), "rb") as fin:
handshake_response_data = fin.read()
with open(os.path.join(datadir, "ac-1.log"), "rb") as fin:
car_data = fin.read()
with open(os.path.join(datadir, "ac-2.log"), "rb") as fin:
lap_data = fin.read()
class AssettoCorsaStateTestCase(unittest.TestCase):
def test_handshake_parsing(self):
data = HandshakeResponse(handshake_response_data)
print data
def test_lap_parsing(self):
print len(lap_data)
lapinfo = RTLap(lap_data)
print lapinfo
def test_carinfo_parsing(self):
print len(car_data)
car = RTCarInfo(car_data)
print car
if __name__ == '__main__':
unittest.main()
# EOF #
| gpl-3.0 | -4,213,333,178,204,055,000 | 28.545455 | 71 | 0.693538 | false |
NeCTAR-RC/swift | test/unit/common/middleware/test_dlo.py | 1 | 42593 | #-*- coding:utf-8 -*-
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import hashlib
import json
import mock
import tempfile
import time
import unittest
from swift.common import exceptions, swob
from swift.common.middleware import dlo
from test.unit.common.middleware.helpers import FakeSwift
from textwrap import dedent
LIMIT = 'swift.common.middleware.dlo.CONTAINER_LISTING_LIMIT'
def md5hex(s):
return hashlib.md5(s).hexdigest()
class DloTestCase(unittest.TestCase):
def call_dlo(self, req, app=None, expect_exception=False):
if app is None:
app = self.dlo
req.headers.setdefault("User-Agent", "Soap Opera")
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
body = ''
caught_exc = None
try:
for chunk in body_iter:
body += chunk
except Exception as exc:
if expect_exception:
caught_exc = exc
else:
raise
if expect_exception:
return status[0], headers[0], body, caught_exc
else:
return status[0], headers[0], body
def setUp(self):
self.app = FakeSwift()
self.dlo = dlo.filter_factory({
# don't slow down tests with rate limiting
'rate_limit_after_segment': '1000000',
})(self.app)
self.app.register(
'GET', '/v1/AUTH_test/c/seg_01',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("aaaaa")},
'aaaaa')
self.app.register(
'GET', '/v1/AUTH_test/c/seg_02',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("bbbbb")},
'bbbbb')
self.app.register(
'GET', '/v1/AUTH_test/c/seg_03',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("ccccc")},
'ccccc')
self.app.register(
'GET', '/v1/AUTH_test/c/seg_04',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("ddddd")},
'ddddd')
self.app.register(
'GET', '/v1/AUTH_test/c/seg_05',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("eeeee")},
'eeeee')
# an unrelated object (not seg*) to test the prefix matching
self.app.register(
'GET', '/v1/AUTH_test/c/catpicture.jpg',
swob.HTTPOk, {'Content-Length': '9',
'Etag': md5hex("meow meow meow meow")},
'meow meow meow meow')
self.app.register(
'GET', '/v1/AUTH_test/mancon/manifest',
swob.HTTPOk, {'Content-Length': '17', 'Etag': 'manifest-etag',
'X-Object-Manifest': 'c/seg'},
'manifest-contents')
lm = '2013-11-22T02:42:13.781760'
ct = 'application/octet-stream'
segs = [{"hash": md5hex("aaaaa"), "bytes": 5,
"name": "seg_01", "last_modified": lm, "content_type": ct},
{"hash": md5hex("bbbbb"), "bytes": 5,
"name": "seg_02", "last_modified": lm, "content_type": ct},
{"hash": md5hex("ccccc"), "bytes": 5,
"name": "seg_03", "last_modified": lm, "content_type": ct},
{"hash": md5hex("ddddd"), "bytes": 5,
"name": "seg_04", "last_modified": lm, "content_type": ct},
{"hash": md5hex("eeeee"), "bytes": 5,
"name": "seg_05", "last_modified": lm, "content_type": ct}]
full_container_listing = segs + [{"hash": "cats-etag", "bytes": 9,
"name": "catpicture.jpg",
"last_modified": lm,
"content_type": "application/png"}]
self.app.register(
'GET', '/v1/AUTH_test/c?format=json',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps(full_container_listing))
self.app.register(
'GET', '/v1/AUTH_test/c?format=json&prefix=seg',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps(segs))
# This is to let us test multi-page container listings; we use the
# trailing underscore to send small (pagesize=3) listings.
#
# If you're testing against this, be sure to mock out
# CONTAINER_LISTING_LIMIT to 3 in your test.
self.app.register(
'GET', '/v1/AUTH_test/mancon/manifest-many-segments',
swob.HTTPOk, {'Content-Length': '7', 'Etag': 'etag-manyseg',
'X-Object-Manifest': 'c/seg_'},
'manyseg')
self.app.register(
'GET', '/v1/AUTH_test/c?format=json&prefix=seg_',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps(segs[:3]))
self.app.register(
'GET', '/v1/AUTH_test/c?format=json&prefix=seg_&marker=seg_03',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps(segs[3:]))
# Here's a manifest with 0 segments
self.app.register(
'GET', '/v1/AUTH_test/mancon/manifest-no-segments',
swob.HTTPOk, {'Content-Length': '7', 'Etag': 'noseg',
'X-Object-Manifest': 'c/noseg_'},
'noseg')
self.app.register(
'GET', '/v1/AUTH_test/c?format=json&prefix=noseg_',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps([]))
class TestDloPutManifest(DloTestCase):
def setUp(self):
super(TestDloPutManifest, self).setUp()
self.app.register(
'PUT', '/v1/AUTH_test/c/m',
swob.HTTPCreated, {}, None)
def test_validating_x_object_manifest(self):
exp_okay = ["c/o",
"c/obj/with/slashes",
"c/obj/with/trailing/slash/",
"c/obj/with//multiple///slashes////adjacent"]
exp_bad = ["",
"/leading/slash",
"double//slash",
"container-only",
"whole-container/",
"c/o?short=querystring",
"c/o?has=a&long-query=string"]
got_okay = []
got_bad = []
for val in (exp_okay + exp_bad):
req = swob.Request.blank("/v1/AUTH_test/c/m",
environ={'REQUEST_METHOD': 'PUT'},
headers={"X-Object-Manifest": val})
status, _, _ = self.call_dlo(req)
if status.startswith("201"):
got_okay.append(val)
else:
got_bad.append(val)
self.assertEqual(exp_okay, got_okay)
self.assertEqual(exp_bad, got_bad)
def test_validation_watches_manifests_with_slashes(self):
self.app.register(
'PUT', '/v1/AUTH_test/con/w/x/y/z',
swob.HTTPCreated, {}, None)
req = swob.Request.blank(
"/v1/AUTH_test/con/w/x/y/z", environ={'REQUEST_METHOD': 'PUT'},
headers={"X-Object-Manifest": 'good/value'})
status, _, _ = self.call_dlo(req)
self.assertEqual(status, "201 Created")
req = swob.Request.blank(
"/v1/AUTH_test/con/w/x/y/z", environ={'REQUEST_METHOD': 'PUT'},
headers={"X-Object-Manifest": '/badvalue'})
status, _, _ = self.call_dlo(req)
self.assertEqual(status, "400 Bad Request")
def test_validation_ignores_containers(self):
self.app.register(
'PUT', '/v1/a/c',
swob.HTTPAccepted, {}, None)
req = swob.Request.blank(
"/v1/a/c", environ={'REQUEST_METHOD': 'PUT'},
headers={"X-Object-Manifest": "/superbogus/?wrong=in&every=way"})
status, _, _ = self.call_dlo(req)
self.assertEqual(status, "202 Accepted")
def test_validation_ignores_accounts(self):
self.app.register(
'PUT', '/v1/a',
swob.HTTPAccepted, {}, None)
req = swob.Request.blank(
"/v1/a", environ={'REQUEST_METHOD': 'PUT'},
headers={"X-Object-Manifest": "/superbogus/?wrong=in&every=way"})
status, _, _ = self.call_dlo(req)
self.assertEqual(status, "202 Accepted")
class TestDloHeadManifest(DloTestCase):
def test_head_large_object(self):
expected_etag = '"%s"' % md5hex(
md5hex("aaaaa") + md5hex("bbbbb") + md5hex("ccccc") +
md5hex("ddddd") + md5hex("eeeee"))
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], expected_etag)
self.assertEqual(headers["Content-Length"], "25")
def test_head_large_object_too_many_segments(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'HEAD'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
# etag is manifest's etag
self.assertEqual(headers["Etag"], "etag-manyseg")
self.assertEqual(headers.get("Content-Length"), None)
def test_head_large_object_no_segments(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-no-segments',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], '"%s"' % md5hex(""))
self.assertEqual(headers["Content-Length"], "0")
# one request to HEAD the manifest
# one request for the first page of listings
# *zero* requests for the second page of listings
self.assertEqual(
self.app.calls,
[('HEAD', '/v1/AUTH_test/mancon/manifest-no-segments'),
('GET', '/v1/AUTH_test/c?format=json&prefix=noseg_')])
class TestDloGetManifest(DloTestCase):
def test_get_manifest(self):
expected_etag = '"%s"' % md5hex(
md5hex("aaaaa") + md5hex("bbbbb") + md5hex("ccccc") +
md5hex("ddddd") + md5hex("eeeee"))
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], expected_etag)
self.assertEqual(headers["Content-Length"], "25")
self.assertEqual(body, 'aaaaabbbbbcccccdddddeeeee')
for _, _, hdrs in self.app.calls_with_headers[1:]:
ua = hdrs.get("User-Agent", "")
self.assertTrue("DLO MultipartGET" in ua)
self.assertFalse("DLO MultipartGET DLO MultipartGET" in ua)
# the first request goes through unaltered
self.assertFalse(
"DLO MultipartGET" in self.app.calls_with_headers[0][2])
# we set swift.source for everything but the first request
self.assertEqual(self.app.swift_sources,
[None, 'DLO', 'DLO', 'DLO', 'DLO', 'DLO', 'DLO'])
def test_get_non_manifest_passthrough(self):
req = swob.Request.blank('/v1/AUTH_test/c/catpicture.jpg',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
self.assertEqual(body, "meow meow meow meow")
def test_get_non_object_passthrough(self):
self.app.register('GET', '/info', swob.HTTPOk,
{}, 'useful stuff here')
req = swob.Request.blank('/info',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, '200 OK')
self.assertEqual(body, 'useful stuff here')
self.assertEqual(self.app.call_count, 1)
def test_get_manifest_passthrough(self):
# reregister it with the query param
self.app.register(
'GET', '/v1/AUTH_test/mancon/manifest?multipart-manifest=get',
swob.HTTPOk, {'Content-Length': '17', 'Etag': 'manifest-etag',
'X-Object-Manifest': 'c/seg'},
'manifest-contents')
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'multipart-manifest=get'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], "manifest-etag")
self.assertEqual(body, "manifest-contents")
def test_error_passthrough(self):
self.app.register(
'GET', '/v1/AUTH_test/gone/404ed',
swob.HTTPNotFound, {}, None)
req = swob.Request.blank('/v1/AUTH_test/gone/404ed',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, '404 Not Found')
def test_get_range(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=8-17'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "10")
self.assertEqual(body, "bbcccccddd")
expected_etag = '"%s"' % md5hex(
md5hex("aaaaa") + md5hex("bbbbb") + md5hex("ccccc") +
md5hex("ddddd") + md5hex("eeeee"))
self.assertEqual(headers.get("Etag"), expected_etag)
def test_get_range_on_segment_boundaries(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=10-19'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "10")
self.assertEqual(body, "cccccddddd")
def test_get_range_first_byte(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-0'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "1")
self.assertEqual(body, "a")
def test_get_range_last_byte(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=24-24'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "1")
self.assertEqual(body, "e")
def test_get_range_overlapping_end(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=18-30'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "7")
self.assertEqual(headers["Content-Range"], "bytes 18-24/25")
self.assertEqual(body, "ddeeeee")
def test_get_range_unsatisfiable(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=25-30'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, "416 Requested Range Not Satisfiable")
def test_get_range_many_segments_satisfiable(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=3-12'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "10")
# The /15 here indicates that this is a 15-byte object. DLO can't tell
# if there are more segments or not without fetching more container
# listings, though, so we just go with the sum of the lengths of the
# segments we can see. In an ideal world, this would be "bytes 3-12/*"
# to indicate that we don't know the full object length. However, RFC
# 2616 section 14.16 explicitly forbids us from doing that:
#
# A response with status code 206 (Partial Content) MUST NOT include
# a Content-Range field with a byte-range-resp-spec of "*".
#
# Since the truth is forbidden, we lie.
self.assertEqual(headers["Content-Range"], "bytes 3-12/15")
self.assertEqual(body, "aabbbbbccc")
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/mancon/manifest-many-segments'),
('GET', '/v1/AUTH_test/c?format=json&prefix=seg_'),
('GET', '/v1/AUTH_test/c/seg_01?multipart-manifest=get'),
('GET', '/v1/AUTH_test/c/seg_02?multipart-manifest=get'),
('GET', '/v1/AUTH_test/c/seg_03?multipart-manifest=get')])
def test_get_range_many_segments_satisfiability_unknown(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=10-22'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
# this requires multiple pages of container listing, so we can't send
# a Content-Length header
self.assertEqual(headers.get("Content-Length"), None)
self.assertEqual(body, "aaaaabbbbbcccccdddddeeeee")
def test_get_suffix_range(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=-40'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "25")
self.assertEqual(body, "aaaaabbbbbcccccdddddeeeee")
def test_get_suffix_range_many_segments(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=-5'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
self.assertEqual(headers.get("Content-Length"), None)
self.assertEqual(headers.get("Content-Range"), None)
self.assertEqual(body, "aaaaabbbbbcccccdddddeeeee")
def test_get_multi_range(self):
# DLO doesn't support multi-range GETs. The way that you express that
# in HTTP is to return a 200 response containing the whole entity.
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=5-9,15-19'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
self.assertEqual(headers.get("Content-Length"), None)
self.assertEqual(headers.get("Content-Range"), None)
self.assertEqual(body, "aaaaabbbbbcccccdddddeeeee")
def test_if_match_matches(self):
manifest_etag = '"%s"' % md5hex(
md5hex("aaaaa") + md5hex("bbbbb") + md5hex("ccccc") +
md5hex("ddddd") + md5hex("eeeee"))
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': manifest_etag})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '25')
self.assertEqual(body, 'aaaaabbbbbcccccdddddeeeee')
def test_if_match_does_not_match(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': 'not it'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(headers['Content-Length'], '0')
self.assertEqual(body, '')
def test_if_none_match_matches(self):
manifest_etag = '"%s"' % md5hex(
md5hex("aaaaa") + md5hex("bbbbb") + md5hex("ccccc") +
md5hex("ddddd") + md5hex("eeeee"))
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': manifest_etag})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '304 Not Modified')
self.assertEqual(headers['Content-Length'], '0')
self.assertEqual(body, '')
def test_if_none_match_does_not_match(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': 'not it'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '25')
self.assertEqual(body, 'aaaaabbbbbcccccdddddeeeee')
def test_get_with_if_modified_since(self):
# It's important not to pass the If-[Un]Modified-Since header to the
# proxy for segment GET requests, as it may result in 304 Not Modified
# responses, and those don't contain segment data.
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': 'Wed, 12 Feb 2014 22:24:52 GMT',
'If-Unmodified-Since': 'Thu, 13 Feb 2014 23:25:53 GMT'})
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
for _, _, hdrs in self.app.calls_with_headers[1:]:
self.assertFalse('If-Modified-Since' in hdrs)
self.assertFalse('If-Unmodified-Since' in hdrs)
def test_error_fetching_first_segment(self):
self.app.register(
'GET', '/v1/AUTH_test/c/seg_01',
swob.HTTPForbidden, {}, None)
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
self.assertTrue(isinstance(exc, exceptions.SegmentError))
self.assertEqual(status, "200 OK")
self.assertEqual(body, '') # error right away -> no body bytes sent
def test_error_fetching_second_segment(self):
self.app.register(
'GET', '/v1/AUTH_test/c/seg_02',
swob.HTTPForbidden, {}, None)
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
self.assertTrue(isinstance(exc, exceptions.SegmentError))
self.assertEqual(status, "200 OK")
self.assertEqual(''.join(body), "aaaaa") # first segment made it out
def test_error_listing_container_first_listing_request(self):
self.app.register(
'GET', '/v1/AUTH_test/c?format=json&prefix=seg_',
swob.HTTPNotFound, {}, None)
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=-5'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
self.assertEqual(status, "404 Not Found")
def test_error_listing_container_second_listing_request(self):
self.app.register(
'GET', '/v1/AUTH_test/c?format=json&prefix=seg_&marker=seg_03',
swob.HTTPNotFound, {}, None)
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=-5'})
with mock.patch(LIMIT, 3):
status, headers, body, exc = self.call_dlo(
req, expect_exception=True)
self.assertTrue(isinstance(exc, exceptions.ListingIterError))
self.assertEqual(status, "200 OK")
self.assertEqual(body, "aaaaabbbbbccccc")
def test_mismatched_etag_fetching_second_segment(self):
self.app.register(
'GET', '/v1/AUTH_test/c/seg_02',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("bbbbb")},
'bbWRONGbb')
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
self.assertTrue(isinstance(exc, exceptions.SegmentError))
self.assertEqual(status, "200 OK")
self.assertEqual(''.join(body), "aaaaabbWRONGbb") # stop after error
def test_etag_comparison_ignores_quotes(self):
# a little future-proofing here in case we ever fix this
self.app.register(
'HEAD', '/v1/AUTH_test/mani/festo',
swob.HTTPOk, {'Content-Length': '0', 'Etag': 'blah',
'X-Object-Manifest': 'c/quotetags'}, None)
self.app.register(
'GET', '/v1/AUTH_test/c?format=json&prefix=quotetags',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps([{"hash": "\"abc\"", "bytes": 5, "name": "quotetags1",
"last_modified": "2013-11-22T02:42:14.261620",
"content-type": "application/octet-stream"},
{"hash": "def", "bytes": 5, "name": "quotetags2",
"last_modified": "2013-11-22T02:42:14.261620",
"content-type": "application/octet-stream"}]))
req = swob.Request.blank('/v1/AUTH_test/mani/festo',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_dlo(req)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(headers["Etag"],
'"' + hashlib.md5("abcdef").hexdigest() + '"')
def test_object_prefix_quoting(self):
self.app.register(
'GET', '/v1/AUTH_test/man/accent',
swob.HTTPOk, {'Content-Length': '0', 'Etag': 'blah',
'X-Object-Manifest': u'c/é'.encode('utf-8')}, None)
segs = [{"hash": md5hex("AAAAA"), "bytes": 5, "name": u"é1"},
{"hash": md5hex("AAAAA"), "bytes": 5, "name": u"é2"}]
self.app.register(
'GET', '/v1/AUTH_test/c?format=json&prefix=%C3%A9',
swob.HTTPOk, {'Content-Type': 'application/json'},
json.dumps(segs))
self.app.register(
'GET', '/v1/AUTH_test/c/\xC3\xa91',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("AAAAA")},
"AAAAA")
self.app.register(
'GET', '/v1/AUTH_test/c/\xC3\xA92',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("BBBBB")},
"BBBBB")
req = swob.Request.blank('/v1/AUTH_test/man/accent',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, "200 OK")
self.assertEqual(body, "AAAAABBBBB")
def test_get_taking_too_long(self):
the_time = [time.time()]
def mock_time():
return the_time[0]
# this is just a convenient place to hang a time jump
def mock_is_success(status_int):
the_time[0] += 9 * 3600
return status_int // 100 == 2
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
with contextlib.nested(
mock.patch('swift.common.request_helpers.time.time',
mock_time),
mock.patch('swift.common.request_helpers.is_success',
mock_is_success),
mock.patch.object(dlo, 'is_success', mock_is_success)):
status, headers, body, exc = self.call_dlo(
req, expect_exception=True)
self.assertEqual(status, '200 OK')
self.assertEqual(body, 'aaaaabbbbbccccc')
self.assertTrue(isinstance(exc, exceptions.SegmentError))
def test_get_oversize_segment(self):
# If we send a Content-Length header to the client, it's based on the
# container listing. If a segment gets bigger by the time we get to it
# (like if a client uploads a bigger segment w/the same name), we need
# to not send anything beyond the length we promised. Also, we should
# probably raise an exception.
# This is now longer than the original seg_03+seg_04+seg_05 combined
self.app.register(
'GET', '/v1/AUTH_test/c/seg_03',
swob.HTTPOk, {'Content-Length': '20', 'Etag': 'seg03-etag'},
'cccccccccccccccccccc')
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK') # sanity check
self.assertEqual(headers.get('Content-Length'), '25') # sanity check
self.assertEqual(body, 'aaaaabbbbbccccccccccccccc')
self.assertTrue(isinstance(exc, exceptions.SegmentError))
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/mancon/manifest'),
('GET', '/v1/AUTH_test/c?format=json&prefix=seg'),
('GET', '/v1/AUTH_test/c/seg_01?multipart-manifest=get'),
('GET', '/v1/AUTH_test/c/seg_02?multipart-manifest=get'),
('GET', '/v1/AUTH_test/c/seg_03?multipart-manifest=get')])
def test_get_undersize_segment(self):
# If we send a Content-Length header to the client, it's based on the
# container listing. If a segment gets smaller by the time we get to
# it (like if a client uploads a smaller segment w/the same name), we
# need to raise an exception so that the connection will be closed by
# the WSGI server. Otherwise, the WSGI server will be waiting for the
# next request, the client will still be waiting for the rest of the
# response, and nobody will be happy.
# Shrink it by a single byte
self.app.register(
'GET', '/v1/AUTH_test/c/seg_03',
swob.HTTPOk, {'Content-Length': '4', 'Etag': md5hex("cccc")},
'cccc')
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '200 OK') # sanity check
self.assertEqual(headers.get('Content-Length'), '25') # sanity check
self.assertEqual(body, 'aaaaabbbbbccccdddddeeeee')
self.assertTrue(isinstance(exc, exceptions.SegmentError))
def test_get_undersize_segment_range(self):
# Shrink it by a single byte
self.app.register(
'GET', '/v1/AUTH_test/c/seg_03',
swob.HTTPOk, {'Content-Length': '4', 'Etag': md5hex("cccc")},
'cccc')
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-14'})
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
headers = swob.HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content') # sanity check
self.assertEqual(headers.get('Content-Length'), '15') # sanity check
self.assertEqual(body, 'aaaaabbbbbcccc')
self.assertTrue(isinstance(exc, exceptions.SegmentError))
def test_get_with_auth_overridden(self):
auth_got_called = [0]
def my_auth():
auth_got_called[0] += 1
return None
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET',
'swift.authorize': my_auth})
status, headers, body = self.call_dlo(req)
self.assertTrue(auth_got_called[0] > 1)
def fake_start_response(*args, **kwargs):
pass
class TestDloCopyHook(DloTestCase):
def setUp(self):
super(TestDloCopyHook, self).setUp()
self.app.register(
'GET', '/v1/AUTH_test/c/o1', swob.HTTPOk,
{'Content-Length': '10', 'Etag': 'o1-etag'},
"aaaaaaaaaa")
self.app.register(
'GET', '/v1/AUTH_test/c/o2', swob.HTTPOk,
{'Content-Length': '10', 'Etag': 'o2-etag'},
"bbbbbbbbbb")
self.app.register(
'GET', '/v1/AUTH_test/c/man',
swob.HTTPOk, {'X-Object-Manifest': 'c/o'},
"manifest-contents")
lm = '2013-11-22T02:42:13.781760'
ct = 'application/octet-stream'
segs = [{"hash": "o1-etag", "bytes": 10, "name": "o1",
"last_modified": lm, "content_type": ct},
{"hash": "o2-etag", "bytes": 5, "name": "o2",
"last_modified": lm, "content_type": ct}]
self.app.register(
'GET', '/v1/AUTH_test/c?format=json&prefix=o',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps(segs))
copy_hook = [None]
# slip this guy in there to pull out the hook
def extract_copy_hook(env, sr):
copy_hook[0] = env.get('swift.copy_hook')
return self.app(env, sr)
self.dlo = dlo.filter_factory({})(extract_copy_hook)
req = swob.Request.blank('/v1/AUTH_test/c/o1',
environ={'REQUEST_METHOD': 'GET'})
self.dlo(req.environ, fake_start_response)
self.copy_hook = copy_hook[0]
self.assertTrue(self.copy_hook is not None) # sanity check
def test_copy_hook_passthrough(self):
source_req = swob.Request.blank(
'/v1/AUTH_test/c/man',
environ={'REQUEST_METHOD': 'GET'})
sink_req = swob.Request.blank(
'/v1/AUTH_test/c/man',
environ={'REQUEST_METHOD': 'PUT'})
source_resp = swob.Response(request=source_req, status=200)
# no X-Object-Manifest header, so do nothing
modified_resp = self.copy_hook(source_req, source_resp, sink_req)
self.assertTrue(modified_resp is source_resp)
def test_copy_hook_manifest(self):
source_req = swob.Request.blank(
'/v1/AUTH_test/c/man',
environ={'REQUEST_METHOD': 'GET'})
sink_req = swob.Request.blank(
'/v1/AUTH_test/c/man',
environ={'REQUEST_METHOD': 'PUT'})
source_resp = swob.Response(
request=source_req, status=200,
headers={"X-Object-Manifest": "c/o"},
app_iter=["manifest"])
# it's a manifest, so copy the segments to make a normal object
modified_resp = self.copy_hook(source_req, source_resp, sink_req)
self.assertTrue(modified_resp is not source_resp)
self.assertEqual(modified_resp.etag,
hashlib.md5("o1-etago2-etag").hexdigest())
self.assertEqual(sink_req.headers.get('X-Object-Manifest'), None)
def test_copy_hook_manifest_with_multipart_manifest_get(self):
source_req = swob.Request.blank(
'/v1/AUTH_test/c/man',
environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'multipart-manifest=get'})
sink_req = swob.Request.blank(
'/v1/AUTH_test/c/man',
environ={'REQUEST_METHOD': 'PUT'})
source_resp = swob.Response(
request=source_req, status=200,
headers={"X-Object-Manifest": "c/o"},
app_iter=["manifest"])
# make sure the sink request (the backend PUT) gets X-Object-Manifest
# on it, but that's all
modified_resp = self.copy_hook(source_req, source_resp, sink_req)
self.assertTrue(modified_resp is source_resp)
self.assertEqual(sink_req.headers.get('X-Object-Manifest'), 'c/o')
class TestDloConfiguration(unittest.TestCase):
"""
For backwards compatibility, we will read a couple of values out of the
proxy's config section if we don't have any config values.
"""
def test_skip_defaults_if_configured(self):
# The presence of even one config value in our config section means we
# won't go looking for the proxy config at all.
proxy_conf = dedent("""
[DEFAULT]
bind_ip = 10.4.5.6
[pipeline:main]
pipeline = catch_errors dlo ye-olde-proxy-server
[filter:dlo]
use = egg:swift#dlo
max_get_time = 3600
[app:ye-olde-proxy-server]
use = egg:swift#proxy
rate_limit_segments_per_sec = 7
rate_limit_after_segment = 13
max_get_time = 2900
""")
conffile = tempfile.NamedTemporaryFile()
conffile.write(proxy_conf)
conffile.flush()
mware = dlo.filter_factory({
'max_get_time': '3600',
'__file__': conffile.name
})("no app here")
self.assertEqual(1, mware.rate_limit_segments_per_sec)
self.assertEqual(10, mware.rate_limit_after_segment)
self.assertEqual(3600, mware.max_get_time)
def test_finding_defaults_from_file(self):
# If DLO has no config vars, go pull them from the proxy server's
# config section
proxy_conf = dedent("""
[DEFAULT]
bind_ip = 10.4.5.6
[pipeline:main]
pipeline = catch_errors dlo ye-olde-proxy-server
[filter:dlo]
use = egg:swift#dlo
[app:ye-olde-proxy-server]
use = egg:swift#proxy
rate_limit_after_segment = 13
max_get_time = 2900
""")
conffile = tempfile.NamedTemporaryFile()
conffile.write(proxy_conf)
conffile.flush()
mware = dlo.filter_factory({
'__file__': conffile.name
})("no app here")
self.assertEqual(1, mware.rate_limit_segments_per_sec)
self.assertEqual(13, mware.rate_limit_after_segment)
self.assertEqual(2900, mware.max_get_time)
def test_finding_defaults_from_dir(self):
# If DLO has no config vars, go pull them from the proxy server's
# config section
proxy_conf1 = dedent("""
[DEFAULT]
bind_ip = 10.4.5.6
[pipeline:main]
pipeline = catch_errors dlo ye-olde-proxy-server
""")
proxy_conf2 = dedent("""
[filter:dlo]
use = egg:swift#dlo
[app:ye-olde-proxy-server]
use = egg:swift#proxy
rate_limit_after_segment = 13
max_get_time = 2900
""")
conf_dir = tempfile.mkdtemp()
conffile1 = tempfile.NamedTemporaryFile(dir=conf_dir, suffix='.conf')
conffile1.write(proxy_conf1)
conffile1.flush()
conffile2 = tempfile.NamedTemporaryFile(dir=conf_dir, suffix='.conf')
conffile2.write(proxy_conf2)
conffile2.flush()
mware = dlo.filter_factory({
'__file__': conf_dir
})("no app here")
self.assertEqual(1, mware.rate_limit_segments_per_sec)
self.assertEqual(13, mware.rate_limit_after_segment)
self.assertEqual(2900, mware.max_get_time)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -5,741,970,016,733,686,000 | 41.293942 | 79 | 0.562573 | false |
nlhepler/freetype-py3 | examples/glyph-outline.py | 1 | 1311 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Glyph outline rendering
'''
from freetype import *
if __name__ == '__main__':
import numpy
import matplotlib.pyplot as plt
face = Face(b'./Vera.ttf')
face.set_char_size( 4*48*64 )
flags = FT_LOAD_DEFAULT | FT_LOAD_NO_BITMAP
face.load_char('S', flags )
slot = face.glyph
glyph = slot.get_glyph()
stroker = Stroker( )
stroker.set(64, FT_STROKER_LINECAP_ROUND, FT_STROKER_LINEJOIN_ROUND, 0 )
glyph.stroke( stroker )
blyph = glyph.to_bitmap(FT_RENDER_MODE_NORMAL, Vector(0,0))
bitmap = blyph.bitmap
width, rows, pitch = bitmap.width, bitmap.rows, bitmap.pitch
top, left = blyph.top, blyph.left
data = []
for i in range(rows):
data.extend(bitmap.buffer[i*pitch:i*pitch+width])
Z = numpy.array(data,dtype=numpy.ubyte).reshape(rows, width)
plt.figure(figsize=(6,8))
plt.imshow(Z, interpolation='nearest', cmap=plt.cm.gray_r)
plt.savefig('test.pdf', format='pdf')
plt.show()
| bsd-3-clause | -7,109,555,045,368,138,000 | 31.775 | 79 | 0.56598 | false |
HybridF5/jacket | jacket/tests/compute/unit/fake_notifier.py | 1 | 3739 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from jacket import rpc
NOTIFICATIONS = []
VERSIONED_NOTIFICATIONS = []
def reset():
del NOTIFICATIONS[:]
del VERSIONED_NOTIFICATIONS[:]
FakeMessage = collections.namedtuple('Message',
['publisher_id', 'priority',
'event_type', 'payload', 'context'])
class FakeNotifier(object):
def __init__(self, transport, publisher_id, serializer=None):
self.transport = transport
self.publisher_id = publisher_id
self._serializer = serializer or messaging.serializer.NoOpSerializer()
for priority in ['debug', 'info', 'warn', 'error', 'critical']:
setattr(self, priority,
functools.partial(self._notify, priority.upper()))
def prepare(self, publisher_id=None):
if publisher_id is None:
publisher_id = self.publisher_id
return self.__class__(self.transport, publisher_id,
serializer=self._serializer)
def _notify(self, priority, ctxt, event_type, payload):
payload = self._serializer.serialize_entity(ctxt, payload)
# NOTE(sileht): simulate the kombu serializer
# this permit to raise an exception if something have not
# been serialized correctly
jsonutils.to_primitive(payload)
# NOTE(melwitt): Try to serialize the context, as the rpc would.
# An exception will be raised if something is wrong
# with the context.
self._serializer.serialize_context(ctxt)
msg = FakeMessage(self.publisher_id, priority, event_type,
payload, ctxt)
NOTIFICATIONS.append(msg)
class FakeVersionedNotifier(FakeNotifier):
def _notify(self, priority, ctxt, event_type, payload):
payload = self._serializer.serialize_entity(ctxt, payload)
VERSIONED_NOTIFICATIONS.append({'publisher_id': self.publisher_id,
'priority': priority,
'event_type': event_type,
'payload': payload})
def stub_notifier(stubs):
stubs.Set(messaging, 'Notifier', FakeNotifier)
if rpc.LEGACY_NOTIFIER and rpc.NOTIFIER:
stubs.Set(rpc, 'LEGACY_NOTIFIER',
FakeNotifier(rpc.LEGACY_NOTIFIER.transport,
rpc.LEGACY_NOTIFIER.publisher_id,
serializer=getattr(rpc.LEGACY_NOTIFIER,
'_serializer',
None)))
stubs.Set(rpc, 'NOTIFIER',
FakeVersionedNotifier(rpc.NOTIFIER.transport,
rpc.NOTIFIER.publisher_id,
serializer=getattr(rpc.NOTIFIER,
'_serializer',
None)))
| apache-2.0 | 6,867,932,777,672,015,000 | 39.641304 | 78 | 0.57395 | false |
cernops/keystone | keystone/policy/backends/rules.py | 1 | 2753 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy engine for keystone."""
from oslo_log import log
from oslo_policy import policy as common_policy
import keystone.conf
from keystone import exception
from keystone.policy.backends import base
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
_ENFORCER = None
def reset():
global _ENFORCER
_ENFORCER = None
def init():
global _ENFORCER
if not _ENFORCER:
_ENFORCER = common_policy.Enforcer(CONF)
def enforce(credentials, action, target, do_raise=True):
"""Verify that the action is valid on the target in this context.
:param credentials: user credentials
:param action: string representing the action to be checked, which should
be colon separated for clarity.
:param target: dictionary representing the object of the action for object
creation this should be a dictionary representing the
location of the object e.g. {'project_id':
object.project_id}
:raises keystone.exception.Forbidden: If verification fails.
Actions should be colon separated for clarity. For example:
* identity:list_users
"""
init()
# Add the exception arguments if asked to do a raise
extra = {}
if do_raise:
extra.update(exc=exception.ForbiddenAction, action=action,
do_raise=do_raise)
return _ENFORCER.enforce(action, target, credentials, **extra)
class Policy(base.PolicyDriverV8):
def enforce(self, credentials, action, target):
LOG.debug('enforce %(action)s: %(credentials)s', {
'action': action,
'credentials': credentials})
enforce(credentials, action, target)
def create_policy(self, policy_id, policy):
raise exception.NotImplemented()
def list_policies(self):
raise exception.NotImplemented()
def get_policy(self, policy_id):
raise exception.NotImplemented()
def update_policy(self, policy_id, policy):
raise exception.NotImplemented()
def delete_policy(self, policy_id):
raise exception.NotImplemented()
| apache-2.0 | -5,975,855,063,058,688,000 | 28.923913 | 78 | 0.680712 | false |
s910324/Sloth | bokehPlotter/bokehLine.py | 1 | 1301 |
class bokehLine(object):
def __init__(self, line, symbol = None, viewNum = None, parent = None):
self.line = line
self.symbol = symbol
self.viewNum = viewNum
self.style = None
self.val = {'name' : self.line.name,
'color' : self.line.line_color,
'width' : self.line.line_width,
'style' : None,
'symbol' : self.symbol,
'visible' : self.line.visible,
'viewNum' : self.viewNum}
def line_val(self, name = None, color = None, width = None,
style = None, symbol = None, visible = None, viewNum = None):
if name is not None:
self.line.name = name
if color:
self.line.line_color = color
if width is not None:
self.line.line_width = width
if style:
self.style = style
if symbol:
self.symbol = symbol
if visible is not None:
self.line.visible = visible
if viewNum is not None:
self.viewNum = viewNum
self.val.update({'name' : self.line.name})
self.val.update({'color' : self.line.line_color})
self.val.update({'width' : self.line.line_width})
self.val.update({'style' : self.style})
self.val.update({'symbol' : self.symbol})
self.val.update({'visible' : self.line.visible})
self.val.update({'viewNum' : self.viewNum})
return self.val
| lgpl-3.0 | -8,097,567,311,734,556,000 | 30.731707 | 72 | 0.607994 | false |
pbrisk/dcf | test/unittests/curve_tests.py | 1 | 5566 | # -*- coding: utf-8 -*-
# dcf
# ---
# A Python library for generating discounted cashflows.
#
# Author: sonntagsgesicht, based on a fork of Deutsche Postbank [pbrisk]
# Version: 0.4, copyright Saturday, 10 October 2020
# Website: https://github.com/sonntagsgesicht/dcf
# License: Apache License 2.0 (see LICENSE file)
import sys
import os
from unittest import TestCase
from math import floor
from businessdate import BusinessDate, BusinessRange
from scipy.interpolate import interp1d
from dcf.interpolation import linear, constant
from dcf import Curve, DateCurve, RateCurve, dyn_scheme
def _silent(func, *args, **kwargs):
_stout = sys.stdout
sys.stdout = open(os.devnull, 'w')
_res = func(*args, **kwargs)
sys.stdout.close()
sys.stdout = _stout
return _res
class CurveUnitTests(TestCase):
def setUp(self):
self.x_list = [float(x) * 0.01 for x in range(10)]
self.y_list = list(self.x_list)
self.interpolation = dyn_scheme(constant, linear, constant)
self.curve = Curve(self.x_list, self.y_list, self.interpolation)
self.x_test = [float(x) * 0.005 for x in range(-10, 30)]
def test_algebra(self):
other = Curve(self.x_list, self.y_list)
new = self.curve + other
for x in new.domain:
self.assertAlmostEqual(new(x), self.curve(x) * 2.)
new = self.curve - other
for x in new.domain:
self.assertAlmostEqual(new(x), 0.)
new = self.curve * other
for x in new.domain:
self.assertAlmostEqual(new(x), self.curve(x) ** 2)
self.assertRaises(ZeroDivisionError, self.curve.__div__, other)
new = self.curve / Curve(self.x_list, [0.1] * len(self.x_list))
for x in new.domain:
self.assertAlmostEqual(new(x), self.curve(x) / 0.1)
def test_init(self):
self.assertEqual(str(Curve()), 'Curve()')
self.assertEqual(str(DateCurve()), 'DateCurve()')
self.assertEqual(str(RateCurve()), 'RateCurve()')
def test_interpolation(self):
# test default interpolation scheme
for x in self.x_test:
f = (lambda t: max(.0, min(t, .09)))
self.assertAlmostEqual(f(x), self.curve(x))
ccc = dyn_scheme(constant, constant, constant)
curve = Curve(self.x_list, self.y_list, ccc)
constant_curve = Curve(self.x_list, self.y_list, constant)
for x in self.x_test:
f = lambda t: max(.0, min(floor(t / .01) * .01, .09))
self.assertAlmostEqual(f(x), curve(x))
self.assertAlmostEqual(constant_curve(x), curve(x))
lll = dyn_scheme(linear, linear, linear)
curve = Curve(self.x_list, self.y_list, lll)
linear_curve = Curve(self.x_list, self.y_list, linear)
for x in self.x_test:
f = lambda t: t
self.assertAlmostEqual(f(x), curve(x))
self.assertAlmostEqual(linear_curve(x), curve(x))
dcf_curve = Curve(self.x_list, self.y_list, dyn_scheme(constant, linear, constant))
scipy_linear = lambda x, y: interp1d(x, y, kind="linear")
scipy_curve = Curve(self.x_list, self.y_list, dyn_scheme(constant, scipy_linear, constant))
for x in self.x_test:
self.assertAlmostEqual(scipy_curve(x), dcf_curve(x))
dcf_curve = Curve(self.x_list, self.y_list, dyn_scheme(linear, linear, linear))
scipy_scheme = lambda x, y: \
interp1d(x, y, kind="linear", fill_value="extrapolate")
scipy_curve = Curve(self.x_list, self.y_list, scipy_scheme)
for x in self.x_test:
self.assertAlmostEqual(scipy_curve(x), dcf_curve(x))
dcf_curve = Curve(self.x_list, self.y_list, dyn_scheme(constant, linear, constant))
scipy_scheme = lambda x, y: \
interp1d(x, y, kind="linear", bounds_error=False, fill_value=(self.y_list[0], self.y_list[-1]))
scipy_curve = Curve(self.x_list, self.y_list, scipy_scheme)
for x in self.x_test:
self.assertAlmostEqual(scipy_curve(x), dcf_curve(x))
class DateCurveUnitTests(TestCase):
def setUp(self):
self.dates = BusinessRange(BusinessDate(), BusinessDate() + '10Y', '1Y')
self.values = [0.01 * n ** 4 - 2 * n ** 2 for n in range(0, len(self.dates))]
self.curve = DateCurve(self.dates, self.values)
def test_dates(self):
for d in self.dates:
self.assertTrue(d in self.curve.domain)
d = BusinessDate() + '3M'
def test_shift_origin(self):
origin1 = BusinessDate()
origin2 = BusinessDate() + "3m2d"
Curve1 = DateCurve(self.dates, self.values, origin=origin1)
Curve2 = DateCurve(self.dates, self.values, origin=origin2)
for d in self.dates:
self.assertAlmostEqual(Curve1(d), Curve2(d))
def test_fixings(self):
curve = DateCurve(self.dates, self.values)
date = BusinessDate() + '1y3m4d'
value = curve(date)
previous = curve(date - '1d')
next = curve(date + '1d')
curve.fixings[date] = value * 2
self.assertAlmostEqual(curve.fixings[date], curve(date))
self.assertAlmostEqual(value * 2, curve(date))
self.assertAlmostEqual(previous, curve(date - '1d'))
self.assertAlmostEqual(next, curve(date + '1d'))
def test_cast(self):
date_curve = DateCurve(self.dates, self.values)
curve = Curve(date_curve)
for x, d in zip(curve.domain, date_curve.domain):
self.assertAlmostEqual(curve(x), date_curve(d))
| apache-2.0 | 6,485,428,454,609,017,000 | 36.863946 | 107 | 0.614625 | false |
dwcoder/diceware | tests/test_config.py | 1 | 5149 | import os
from diceware.config import (
OPTIONS_DEFAULTS, valid_locations, get_configparser, get_config_dict,
configparser,
)
class TestConfigModule(object):
# tests for diceware.config
def test_defaults(self):
# there is a set of defaults for options available
assert OPTIONS_DEFAULTS is not None
def test_valid_locations(self, home_dir):
# we look for config files in user home and local dir
assert valid_locations() == [
str(home_dir / ".diceware.ini")
]
def test_get_configparser(self, tmpdir):
# we can parse simple configs
conf_path = tmpdir / "sample.ini"
conf_path.write("\n".join(["[diceware]", "num=1", ""]))
found, config = get_configparser([str(conf_path), ])
assert found == [str(conf_path)]
def test_get_configparser_empty_list(self):
# we cope with empty config file lists
found, config = get_configparser([])
assert found == []
def test_get_configparser_no_list(self, home_dir):
# we cope with no list at all
found, config = get_configparser()
assert found == []
def test_get_configparser_default_path(self, home_dir):
# a config file in $HOME is looked up by default
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "num = 3", ""]))
found, config = get_configparser()
assert found == [str(config_file)]
def test_get_config_dict_no_config_file(self, home_dir):
# we get config values even without a config file.
conf_dict = get_config_dict()
assert conf_dict == OPTIONS_DEFAULTS
def test_get_config_dict_no_diceware_section(self, home_dir):
# we cope with config files, if they do not contain a diceware config
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[not-diceware]", "num = 3", ""]))
conf_dict = get_config_dict()
assert conf_dict == OPTIONS_DEFAULTS
def test_get_config_dict(self, home_dir):
# we can get config values from files as a dict.
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "num = 3", ""]))
conf_dict = get_config_dict()
assert len(conf_dict) == len(OPTIONS_DEFAULTS)
assert conf_dict != OPTIONS_DEFAULTS
def test_get_config_dict_int(self, home_dir):
# integer values are interpolated correctly
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "num=3", ""]))
conf_dict = get_config_dict()
assert "num" in conf_dict.keys()
assert conf_dict["num"] == 3
def test_get_config_dict_bool(self, home_dir):
# boolean values are interpolated correctly
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "caps = Off", ""]))
conf_dict = get_config_dict()
assert "caps" in conf_dict.keys()
assert conf_dict["caps"] is False
config_file.write("\n".join(["[diceware]", "caps = On", ""]))
assert get_config_dict()["caps"] is True
def test_get_config_dict_ignore_irrelevant(self, home_dir):
# values that have no default are ignored
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "foo = bar", ""]))
conf_dict = get_config_dict()
assert "foo" not in conf_dict.keys()
def test_get_config_dict_string(self, home_dir):
# string values are interpolated correctly
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "delimiter=!", ""]))
conf_dict = get_config_dict()
assert conf_dict["delimiter"] == "!"
def test_get_config_dict_string_empty(self, home_dir):
# we can set empty string values
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "delimiter=", ""]))
conf_dict = get_config_dict()
assert conf_dict["delimiter"] == ""
class TestSampleIni(object):
# test local sample ini file
def test_complete_options_set(self, home_dir):
# make sure the set of options in sample file is complete
sample_path = os.path.join(
os.path.dirname(__file__), 'sample_dot_diceware.ini')
parser = configparser.SafeConfigParser()
found = parser.read([sample_path, ])
assert sample_path in found
assert parser.has_section('diceware')
for key, val in OPTIONS_DEFAULTS.items():
# make sure option keywords are contained.
assert parser.has_option('diceware', key)
def test_no_invalid_options(self, home_dir):
# ensure we have no obsolete/unused options in sample
sample_path = os.path.join(
os.path.dirname(__file__), 'sample_dot_diceware.ini')
parser = configparser.SafeConfigParser()
parser.read([sample_path, ])
for option in parser.options('diceware'):
assert option in OPTIONS_DEFAULTS.keys()
| gpl-3.0 | -8,182,861,869,564,112,000 | 39.543307 | 77 | 0.606331 | false |
SalesforceFoundation/mrbelvedereci | metaci/testresults/migrations/0009_auto_20181207_2010.py | 1 | 1093 | # Generated by Django 2.1.3 on 2018-12-07 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("testresults", "0008_merge_20180911_1915"),
]
operations = [
migrations.AlterField(
model_name="testclass",
name="test_type",
field=models.CharField(
choices=[
("Apex", "Apex"),
("JUnit", "JUnit"),
("Robot", "Robot"),
("Other", "Other"),
],
db_index=True,
max_length=32,
),
),
migrations.AlterField(
model_name="testresult",
name="outcome",
field=models.CharField(
choices=[
("Pass", "Pass"),
("CompileFail", "CompileFail"),
("Fail", "Fail"),
("Skip", "Skip"),
],
db_index=True,
max_length=16,
),
),
]
| bsd-3-clause | -2,158,632,882,686,530,300 | 25.658537 | 52 | 0.399817 | false |
demonchild2112/travis-test | grr/server/grr_response_server/gui/selenium_tests/report_test.py | 1 | 4596 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from selenium.webdriver.common import keys
from grr_response_core.lib import rdfvalue
from grr_response_server import data_store
from grr_response_server.gui import gui_test_lib
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import test_lib
def AddFakeAuditLog(user=None, router_method_name=None):
data_store.REL_DB.WriteAPIAuditEntry(
rdf_objects.APIAuditEntry(
username=user,
router_method_name=router_method_name,
))
class TestReports(gui_test_lib.GRRSeleniumTest):
"""Test the reports interface."""
def testReports(self):
"""Test the reports interface."""
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")):
AddFakeAuditLog(user="User123")
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/22")):
AddFakeAuditLog(user="User456")
# Make "test" user an admin.
self.CreateAdminUser(u"test")
self.Open("/#/stats/")
# Go to reports.
self.Click("css=#MostActiveUsersReportPlugin_anchor i.jstree-icon")
self.WaitUntil(self.IsTextPresent, "Server | User Breakdown")
# Enter a timerange that only matches one of the two fake events.
self.Type("css=grr-form-datetime input", "2012-12-21 12:34")
self.Click("css=button:contains('Show report')")
self.WaitUntil(self.IsTextPresent, "User456")
self.assertFalse(self.IsTextPresent("User123"))
def testReportsDontIncludeTimerangesInUrlsOfReportsThatDontUseThem(self):
client_id = self.SetupClient(0)
self.AddClientLabel(client_id, u"owner", u"bar")
self.Open("/#/stats/")
# Go to reports.
self.Click("css=#MostActiveUsersReportPlugin_anchor i.jstree-icon")
self.WaitUntil(self.IsTextPresent, "Server | User Breakdown")
# Default values aren't shown in the url.
self.WaitUntilNot(lambda: "start_time" in self.GetCurrentUrlPath())
self.assertNotIn("duration", self.GetCurrentUrlPath())
# Enter a timerange.
self.Type("css=grr-form-datetime input", "2012-12-21 12:34")
self.Type("css=grr-form-duration input", "2w")
self.Click("css=button:contains('Show report')")
# Reports that require timeranges include nondefault values in the url when
# `Show report' has been clicked.
self.WaitUntil(lambda: "start_time" in self.GetCurrentUrlPath())
self.assertIn("duration", self.GetCurrentUrlPath())
# Select a different report.
self.Click("css=#LastActiveReportPlugin_anchor i.jstree-icon")
self.WaitUntil(self.IsTextPresent, "Client | Last Active")
# The default label isn't included in the url.
self.WaitUntilNot(lambda: "bar" in self.GetCurrentUrlPath())
# Select a client label.
self.Select("css=grr-report select", "bar")
self.Click("css=button:contains('Show report')")
# Reports that require labels include them in the url after `Show report'
# has been clicked.
self.WaitUntil(lambda: "bar" in self.GetCurrentUrlPath())
# Reports that dont require timeranges don't mention them in the url.
self.assertNotIn("start_time", self.GetCurrentUrlPath())
self.assertNotIn("duration", self.GetCurrentUrlPath())
# Select a different report.
self.Click("css=#GRRVersion7ReportPlugin_anchor i.jstree-icon")
self.WaitUntil(self.IsTextPresent, "Active Clients - 7 Days Active")
# The label is cleared when report type is changed.
self.WaitUntilNot(lambda: "bar" in self.GetCurrentUrlPath())
self.assertNotIn("start_time", self.GetCurrentUrlPath())
self.assertNotIn("duration", self.GetCurrentUrlPath())
class TestDateTimeInput(gui_test_lib.GRRSeleniumTest):
"""Tests datetime-form-directive."""
def testInputAllowsInvalidText(self):
# Make "test" user an admin.
self.CreateAdminUser(u"test")
# Open any page that shows the datetime-form-directive.
self.Open("/#/stats/HuntApprovalsReportPlugin")
datetime_input = self.WaitUntil(self.GetVisibleElement,
"css=grr-form-datetime input")
value = datetime_input.get_attribute("value")
self.assertRegexpMatches(value, r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}")
self.assertStartsWith(value, "20")
datetime_input.send_keys(keys.Keys.BACKSPACE)
self.WaitUntilNot(self.IsTextPresent, value)
self.assertEqual(value[:-1], datetime_input.get_attribute("value"))
if __name__ == "__main__":
app.run(test_lib.main)
| apache-2.0 | -8,424,589,681,675,763,000 | 34.90625 | 79 | 0.708442 | false |
rahuldan/sympy | sympy/sets/fancysets.py | 2 | 46732 | from __future__ import print_function, division
from sympy.logic.boolalg import And
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.compatibility import as_int, with_metaclass, range, PY3
from sympy.core.expr import Expr
from sympy.core.function import Lambda, _coeff_isneg
from sympy.core.singleton import Singleton, S
from sympy.core.symbol import Dummy, symbols, Wild
from sympy.core.sympify import _sympify, sympify, converter
from sympy.sets.sets import (Set, Interval, Intersection, EmptySet, Union,
FiniteSet, imageset)
from sympy.sets.conditionset import ConditionSet
from sympy.utilities.misc import filldedent, func_name
class Naturals(with_metaclass(Singleton, Set)):
"""
Represents the natural numbers (or counting numbers) which are all
positive integers starting from 1. This set is also available as
the Singleton, S.Naturals.
Examples
========
>>> from sympy import S, Interval, pprint
>>> 5 in S.Naturals
True
>>> iterable = iter(S.Naturals)
>>> next(iterable)
1
>>> next(iterable)
2
>>> next(iterable)
3
>>> pprint(S.Naturals.intersect(Interval(0, 10)))
{1, 2, ..., 10}
See Also
========
Naturals0 : non-negative integers (i.e. includes 0, too)
Integers : also includes negative integers
"""
is_iterable = True
_inf = S.One
_sup = S.Infinity
def _intersect(self, other):
if other.is_Interval:
return Intersection(
S.Integers, other, Interval(self._inf, S.Infinity))
return None
def _contains(self, other):
if other.is_positive and other.is_integer:
return S.true
elif other.is_integer is False or other.is_positive is False:
return S.false
def __iter__(self):
i = self._inf
while True:
yield i
i = i + 1
@property
def _boundary(self):
return self
class Naturals0(Naturals):
"""Represents the whole numbers which are all the non-negative integers,
inclusive of zero.
See Also
========
Naturals : positive integers; does not include 0
Integers : also includes the negative integers
"""
_inf = S.Zero
def _contains(self, other):
if other.is_integer and other.is_nonnegative:
return S.true
elif other.is_integer is False or other.is_nonnegative is False:
return S.false
class Integers(with_metaclass(Singleton, Set)):
"""
Represents all integers: positive, negative and zero. This set is also
available as the Singleton, S.Integers.
Examples
========
>>> from sympy import S, Interval, pprint
>>> 5 in S.Naturals
True
>>> iterable = iter(S.Integers)
>>> next(iterable)
0
>>> next(iterable)
1
>>> next(iterable)
-1
>>> next(iterable)
2
>>> pprint(S.Integers.intersect(Interval(-4, 4)))
{-4, -3, ..., 4}
See Also
========
Naturals0 : non-negative integers
Integers : positive and negative integers and zero
"""
is_iterable = True
def _intersect(self, other):
from sympy.functions.elementary.integers import floor, ceiling
if other is Interval(S.NegativeInfinity, S.Infinity) or other is S.Reals:
return self
elif other.is_Interval:
s = Range(ceiling(other.left), floor(other.right) + 1)
return s.intersect(other) # take out endpoints if open interval
return None
def _contains(self, other):
if other.is_integer:
return S.true
elif other.is_integer is False:
return S.false
def __iter__(self):
yield S.Zero
i = S.One
while True:
yield i
yield -i
i = i + 1
@property
def _inf(self):
return -S.Infinity
@property
def _sup(self):
return S.Infinity
@property
def _boundary(self):
return self
def _eval_imageset(self, f):
expr = f.expr
if not isinstance(expr, Expr):
return
if len(f.variables) > 1:
return
n = f.variables[0]
# f(x) + c and f(-x) + c cover the same integers
# so choose the form that has the fewest negatives
c = f(0)
fx = f(n) - c
f_x = f(-n) - c
neg_count = lambda e: sum(_coeff_isneg(_) for _ in Add.make_args(e))
if neg_count(f_x) < neg_count(fx):
expr = f_x + c
a = Wild('a', exclude=[n])
b = Wild('b', exclude=[n])
match = expr.match(a*n + b)
if match and match[a]:
# canonical shift
expr = match[a]*n + match[b] % match[a]
if expr != f.expr:
return ImageSet(Lambda(n, expr), S.Integers)
class Reals(with_metaclass(Singleton, Interval)):
def __new__(cls):
return Interval.__new__(cls, -S.Infinity, S.Infinity)
def __eq__(self, other):
return other == Interval(-S.Infinity, S.Infinity)
def __hash__(self):
return hash(Interval(-S.Infinity, S.Infinity))
class ImageSet(Set):
"""
Image of a set under a mathematical function. The transformation
must be given as a Lambda function which has as many arguments
as the elements of the set upon which it operates, e.g. 1 argument
when acting on the set of integers or 2 arguments when acting on
a complex region.
This function is not normally called directly, but is called
from `imageset`.
Examples
========
>>> from sympy import Symbol, S, pi, Dummy, Lambda
>>> from sympy.sets.sets import FiniteSet, Interval
>>> from sympy.sets.fancysets import ImageSet
>>> x = Symbol('x')
>>> N = S.Naturals
>>> squares = ImageSet(Lambda(x, x**2), N) # {x**2 for x in N}
>>> 4 in squares
True
>>> 5 in squares
False
>>> FiniteSet(0, 1, 2, 3, 4, 5, 6, 7, 9, 10).intersect(squares)
{1, 4, 9}
>>> square_iterable = iter(squares)
>>> for i in range(4):
... next(square_iterable)
1
4
9
16
>>> n = Dummy('n')
>>> solutions = ImageSet(Lambda(n, n*pi), S.Integers) # solutions of sin(x) = 0
>>> dom = Interval(-1, 1)
>>> dom.intersect(solutions)
{0}
See Also
========
sympy.sets.sets.imageset
"""
def __new__(cls, lamda, base_set):
if not isinstance(lamda, Lambda):
raise ValueError('first argument must be a Lambda')
if lamda is S.IdentityFunction:
return base_set
if not lamda.expr.free_symbols or not lamda.expr.args:
return FiniteSet(lamda.expr)
return Basic.__new__(cls, lamda, base_set)
lamda = property(lambda self: self.args[0])
base_set = property(lambda self: self.args[1])
def __iter__(self):
already_seen = set()
for i in self.base_set:
val = self.lamda(i)
if val in already_seen:
continue
else:
already_seen.add(val)
yield val
def _is_multivariate(self):
return len(self.lamda.variables) > 1
def _contains(self, other):
from sympy.matrices import Matrix
from sympy.solvers.solveset import solveset, linsolve
from sympy.utilities.iterables import is_sequence, iterable, cartes
L = self.lamda
if is_sequence(other):
if not is_sequence(L.expr):
return S.false
if len(L.expr) != len(other):
raise ValueError(filldedent('''
Dimensions of other and output of Lambda are different.'''))
elif iterable(other):
raise ValueError(filldedent('''
`other` should be an ordered object like a Tuple.'''))
solns = None
if self._is_multivariate():
if not is_sequence(L.expr):
# exprs -> (numer, denom) and check again
# XXX this is a bad idea -- make the user
# remap self to desired form
return other.as_numer_denom() in self.func(
Lambda(L.variables, L.expr.as_numer_denom()), self.base_set)
eqs = [expr - val for val, expr in zip(other, L.expr)]
variables = L.variables
free = set(variables)
if all(i.is_number for i in list(Matrix(eqs).jacobian(variables))):
solns = list(linsolve([e - val for e, val in
zip(L.expr, other)], variables))
else:
syms = [e.free_symbols & free for e in eqs]
solns = {}
for i, (e, s, v) in enumerate(zip(eqs, syms, other)):
if not s:
if e != v:
return S.false
solns[vars[i]] = [v]
continue
elif len(s) == 1:
sy = s.pop()
sol = solveset(e, sy)
if sol is S.EmptySet:
return S.false
elif isinstance(sol, FiniteSet):
solns[sy] = list(sol)
else:
raise NotImplementedError
else:
raise NotImplementedError
solns = cartes(*[solns[s] for s in variables])
else:
x = L.variables[0]
if isinstance(L.expr, Expr):
# scalar -> scalar mapping
solnsSet = solveset(L.expr - other, x)
if solnsSet.is_FiniteSet:
solns = list(solnsSet)
else:
msgset = solnsSet
else:
# scalar -> vector
for e, o in zip(L.expr, other):
solns = solveset(e - o, x)
if solns is S.EmptySet:
return S.false
for soln in solns:
try:
if soln in self.base_set:
break # check next pair
except TypeError:
if self.base_set.contains(soln.evalf()):
break
else:
return S.false # never broke so there was no True
return S.true
if solns is None:
raise NotImplementedError(filldedent('''
Determining whether %s contains %s has not
been implemented.''' % (msgset, other)))
for soln in solns:
try:
if soln in self.base_set:
return S.true
except TypeError:
return self.base_set.contains(soln.evalf())
return S.false
@property
def is_iterable(self):
return self.base_set.is_iterable
def _intersect(self, other):
from sympy.solvers.diophantine import diophantine
if self.base_set is S.Integers:
g = None
if isinstance(other, ImageSet) and other.base_set is S.Integers:
g = other.lamda.expr
m = other.lamda.variables[0]
elif other is S.Integers:
m = g = Dummy('x')
if g is not None:
f = self.lamda.expr
n = self.lamda.variables[0]
# Diophantine sorts the solutions according to the alphabetic
# order of the variable names, since the result should not depend
# on the variable name, they are replaced by the dummy variables
# below
a, b = Dummy('a'), Dummy('b')
f, g = f.subs(n, a), g.subs(m, b)
solns_set = diophantine(f - g)
if solns_set == set():
return EmptySet()
solns = list(diophantine(f - g))
if len(solns) != 1:
return
# since 'a' < 'b', select soln for n
nsol = solns[0][0]
t = nsol.free_symbols.pop()
return imageset(Lambda(n, f.subs(a, nsol.subs(t, n))), S.Integers)
if other == S.Reals:
from sympy.solvers.solveset import solveset_real
from sympy.core.function import expand_complex
if len(self.lamda.variables) > 1:
return None
f = self.lamda.expr
n = self.lamda.variables[0]
n_ = Dummy(n.name, real=True)
f_ = f.subs(n, n_)
re, im = f_.as_real_imag()
im = expand_complex(im)
return imageset(Lambda(n_, re),
self.base_set.intersect(
solveset_real(im, n_)))
elif isinstance(other, Interval):
from sympy.solvers.solveset import (invert_real, invert_complex,
solveset)
f = self.lamda.expr
n = self.lamda.variables[0]
base_set = self.base_set
new_inf, new_sup = None, None
if f.is_real:
inverter = invert_real
else:
inverter = invert_complex
g1, h1 = inverter(f, other.inf, n)
g2, h2 = inverter(f, other.sup, n)
if all(isinstance(i, FiniteSet) for i in (h1, h2)):
if g1 == n:
if len(h1) == 1:
new_inf = h1.args[0]
if g2 == n:
if len(h2) == 1:
new_sup = h2.args[0]
# TODO: Design a technique to handle multiple-inverse
# functions
# Any of the new boundary values cannot be determined
if any(i is None for i in (new_sup, new_inf)):
return
range_set = S.EmptySet
if all(i.is_real for i in (new_sup, new_inf)):
new_interval = Interval(new_inf, new_sup)
range_set = base_set._intersect(new_interval)
else:
if other.is_subset(S.Reals):
solutions = solveset(f, n, S.Reals)
if not isinstance(range_set, (ImageSet, ConditionSet)):
range_set = solutions._intersect(other)
else:
return
if range_set is S.EmptySet:
return S.EmptySet
elif isinstance(range_set, Range) and range_set.size is not S.Infinity:
range_set = FiniteSet(*list(range_set))
if range_set is not None:
return imageset(Lambda(n, f), range_set)
return
else:
return
class Range(Set):
"""
Represents a range of integers. Can be called as Range(stop),
Range(start, stop), or Range(start, stop, step); when stop is
not given it defaults to 1.
`Range(stop)` is the same as `Range(0, stop, 1)` and the stop value
(juse as for Python ranges) is not included in the Range values.
>>> from sympy import Range
>>> list(Range(3))
[0, 1, 2]
The step can also be negative:
>>> list(Range(10, 0, -2))
[10, 8, 6, 4, 2]
The stop value is made canonical so equivalent ranges always
have the same args:
>>> Range(0, 10, 3)
Range(0, 12, 3)
Infinite ranges are allowed. If the starting point is infinite,
then the final value is ``stop - step``. To iterate such a range,
it needs to be reversed:
>>> from sympy import oo
>>> r = Range(-oo, 1)
>>> r[-1]
0
>>> next(iter(r))
Traceback (most recent call last):
...
ValueError: Cannot iterate over Range with infinite start
>>> next(iter(r.reversed))
0
Although Range is a set (and supports the normal set
operations) it maintains the order of the elements and can
be used in contexts where `range` would be used.
>>> from sympy import Interval
>>> Range(0, 10, 2).intersect(Interval(3, 7))
Range(4, 8, 2)
>>> list(_)
[4, 6]
Athough slicing of a Range will always return a Range -- possibly
empty -- an empty set will be returned from any intersection that
is empty:
>>> Range(3)[:0]
Range(0, 0, 1)
>>> Range(3).intersect(Interval(4, oo))
EmptySet()
>>> Range(3).intersect(Range(4, oo))
EmptySet()
"""
is_iterable = True
def __new__(cls, *args):
from sympy.functions.elementary.integers import ceiling
if len(args) == 1:
if isinstance(args[0], range if PY3 else xrange):
args = args[0].__reduce__()[1] # use pickle method
# expand range
slc = slice(*args)
if slc.step == 0:
raise ValueError("step cannot be 0")
start, stop, step = slc.start or 0, slc.stop, slc.step or 1
try:
start, stop, step = [
w if w in [S.NegativeInfinity, S.Infinity]
else sympify(as_int(w))
for w in (start, stop, step)]
except ValueError:
raise ValueError(filldedent('''
Finite arguments to Range must be integers; `imageset` can define
other cases, e.g. use `imageset(i, i/10, Range(3))` to give
[0, 1/10, 1/5].'''))
if not step.is_Integer:
raise ValueError(filldedent('''
Ranges must have a literal integer step.'''))
if all(i.is_infinite for i in (start, stop)):
if start == stop:
# canonical null handled below
start = stop = S.One
else:
raise ValueError(filldedent('''
Either the start or end value of the Range must be finite.'''))
if start.is_infinite:
end = stop
else:
ref = start if start.is_finite else stop
n = ceiling((stop - ref)/step)
if n <= 0:
# null Range
start = end = 0
step = 1
else:
end = ref + n*step
return Basic.__new__(cls, start, end, step)
start = property(lambda self: self.args[0])
stop = property(lambda self: self.args[1])
step = property(lambda self: self.args[2])
@property
def reversed(self):
"""Return an equivalent Range in the opposite order.
Examples
========
>>> from sympy import Range
>>> Range(10).reversed
Range(9, -1, -1)
"""
if not self:
return self
return self.func(
self.stop - self.step, self.start - self.step, -self.step)
def _intersect(self, other):
from sympy.functions.elementary.integers import ceiling, floor
from sympy.functions.elementary.complexes import sign
if other is S.Naturals:
return self._intersect(Interval(1, S.Infinity))
if other is S.Integers:
return self
if other.is_Interval:
if not all(i.is_number for i in other.args[:2]):
return
# In case of null Range, return an EmptySet.
if self.size == 0:
return S.EmptySet
# trim down to self's size, and represent
# as a Range with step 1.
start = ceiling(max(other.inf, self.inf))
if start not in other:
start += 1
end = floor(min(other.sup, self.sup))
if end not in other:
end -= 1
return self.intersect(Range(start, end + 1))
if isinstance(other, Range):
from sympy.solvers.diophantine import diop_linear
from sympy.core.numbers import ilcm
# non-overlap quick exits
if not other:
return S.EmptySet
if not self:
return S.EmptySet
if other.sup < self.inf:
return S.EmptySet
if other.inf > self.sup:
return S.EmptySet
# work with finite end at the start
r1 = self
if r1.start.is_infinite:
r1 = r1.reversed
r2 = other
if r2.start.is_infinite:
r2 = r2.reversed
# this equation represents the values of the Range;
# it's a linear equation
eq = lambda r, i: r.start + i*r.step
# we want to know when the two equations might
# have integer solutions so we use the diophantine
# solver
a, b = diop_linear(eq(r1, Dummy()) - eq(r2, Dummy()))
# check for no solution
no_solution = a is None and b is None
if no_solution:
return S.EmptySet
# there is a solution
# -------------------
# find the coincident point, c
a0 = a.as_coeff_Add()[0]
c = eq(r1, a0)
# find the first point, if possible, in each range
# since c may not be that point
def _first_finite_point(r1, c):
if c == r1.start:
return c
# st is the signed step we need to take to
# get from c to r1.start
st = sign(r1.start - c)*step
# use Range to calculate the first point:
# we want to get as close as possible to
# r1.start; the Range will not be null since
# it will at least contain c
s1 = Range(c, r1.start + st, st)[-1]
if s1 == r1.start:
pass
else:
# if we didn't hit r1.start then, if the
# sign of st didn't match the sign of r1.step
# we are off by one and s1 is not in r1
if sign(r1.step) != sign(st):
s1 -= st
if s1 not in r1:
return
return s1
# calculate the step size of the new Range
step = abs(ilcm(r1.step, r2.step))
s1 = _first_finite_point(r1, c)
if s1 is None:
return S.EmptySet
s2 = _first_finite_point(r2, c)
if s2 is None:
return S.EmptySet
# replace the corresponding start or stop in
# the original Ranges with these points; the
# result must have at least one point since
# we know that s1 and s2 are in the Ranges
def _updated_range(r, first):
st = sign(r.step)*step
if r.start.is_finite:
rv = Range(first, r.stop, st)
else:
rv = Range(r.start, first + st, st)
return rv
r1 = _updated_range(self, s1)
r2 = _updated_range(other, s2)
# work with them both in the increasing direction
if sign(r1.step) < 0:
r1 = r1.reversed
if sign(r2.step) < 0:
r2 = r2.reversed
# return clipped Range with positive step; it
# can't be empty at this point
start = max(r1.start, r2.start)
stop = min(r1.stop, r2.stop)
return Range(start, stop, step)
else:
return
def _contains(self, other):
if not self:
return S.false
if other.is_infinite:
return S.false
if not other.is_integer:
return other.is_integer
ref = self.start if self.start.is_finite else self.stop
if (ref - other) % self.step: # off sequence
return S.false
return _sympify(other >= self.inf and other <= self.sup)
def __iter__(self):
if self.start in [S.NegativeInfinity, S.Infinity]:
raise ValueError("Cannot iterate over Range with infinite start")
elif self:
i = self.start
step = self.step
while True:
if (step > 0 and not (self.start <= i < self.stop)) or \
(step < 0 and not (self.stop < i <= self.start)):
break
yield i
i += step
def __len__(self):
if not self:
return 0
dif = self.stop - self.start
if dif.is_infinite:
raise ValueError(
"Use .size to get the length of an infinite Range")
return abs(dif//self.step)
@property
def size(self):
try:
return _sympify(len(self))
except ValueError:
return S.Infinity
def __nonzero__(self):
return self.start != self.stop
__bool__ = __nonzero__
def __getitem__(self, i):
from sympy.functions.elementary.integers import ceiling
ooslice = "cannot slice from the end with an infinite value"
zerostep = "slice step cannot be zero"
# if we had to take every other element in the following
# oo, ..., 6, 4, 2, 0
# we might get oo, ..., 4, 0 or oo, ..., 6, 2
ambiguous = "cannot unambiguously re-stride from the end " + \
"with an infinite value"
if isinstance(i, slice):
if self.size.is_finite:
start, stop, step = i.indices(self.size)
n = ceiling((stop - start)/step)
if n <= 0:
return Range(0)
canonical_stop = start + n*step
end = canonical_stop - step
ss = step*self.step
return Range(self[start], self[end] + ss, ss)
else: # infinite Range
start = i.start
stop = i.stop
if i.step == 0:
raise ValueError(zerostep)
step = i.step or 1
ss = step*self.step
#---------------------
# handle infinite on right
# e.g. Range(0, oo) or Range(0, -oo, -1)
# --------------------
if self.stop.is_infinite:
# start and stop are not interdependent --
# they only depend on step --so we use the
# equivalent reversed values
return self.reversed[
stop if stop is None else -stop + 1:
start if start is None else -start:
step].reversed
#---------------------
# handle infinite on the left
# e.g. Range(oo, 0, -1) or Range(-oo, 0)
# --------------------
# consider combinations of
# start/stop {== None, < 0, == 0, > 0} and
# step {< 0, > 0}
if start is None:
if stop is None:
if step < 0:
return Range(self[-1], self.start, ss)
elif step > 1:
raise ValueError(ambiguous)
else: # == 1
return self
elif stop < 0:
if step < 0:
return Range(self[-1], self[stop], ss)
else: # > 0
return Range(self.start, self[stop], ss)
elif stop == 0:
if step > 0:
return Range(0)
else: # < 0
raise ValueError(ooslice)
elif stop == 1:
if step > 0:
raise ValueError(ooslice) # infinite singleton
else: # < 0
raise ValueError(ooslice)
else: # > 1
raise ValueError(ooslice)
elif start < 0:
if stop is None:
if step < 0:
return Range(self[start], self.start, ss)
else: # > 0
return Range(self[start], self.stop, ss)
elif stop < 0:
return Range(self[start], self[stop], ss)
elif stop == 0:
if step < 0:
raise ValueError(ooslice)
else: # > 0
return Range(0)
elif stop > 0:
raise ValueError(ooslice)
elif start == 0:
if stop is None:
if step < 0:
raise ValueError(ooslice) # infinite singleton
elif step > 1:
raise ValueError(ambiguous)
else: # == 1
return self
elif stop < 0:
if step > 1:
raise ValueError(ambiguous)
elif step == 1:
return Range(self.start, self[stop], ss)
else: # < 0
return Range(0)
else: # >= 0
raise ValueError(ooslice)
elif start > 0:
raise ValueError(ooslice)
else:
if not self:
raise IndexError('Range index out of range')
if i == 0:
return self.start
if i == -1 or i is S.Infinity:
return self.stop - self.step
rv = (self.stop if i < 0 else self.start) + i*self.step
if rv.is_infinite:
raise ValueError(ooslice)
if rv < self.inf or rv > self.sup:
raise IndexError("Range index out of range")
return rv
def _eval_imageset(self, f):
from sympy.core.function import expand_mul
if not self:
return S.EmptySet
if not isinstance(f.expr, Expr):
return
if self.size == 1:
return FiniteSet(f(self[0]))
if f is S.IdentityFunction:
return self
x = f.variables[0]
expr = f.expr
# handle f that is linear in f's variable
if x not in expr.free_symbols or x in expr.diff(x).free_symbols:
return
if self.start.is_finite:
F = f(self.step*x + self.start) # for i in range(len(self))
else:
F = f(-self.step*x + self[-1])
F = expand_mul(F)
if F != expr:
return imageset(x, F, Range(self.size))
@property
def _inf(self):
if not self:
raise NotImplementedError
if self.step > 0:
return self.start
else:
return self.stop - self.step
@property
def _sup(self):
if not self:
raise NotImplementedError
if self.step > 0:
return self.stop - self.step
else:
return self.start
@property
def _boundary(self):
return self
if PY3:
converter[range] = Range
else:
converter[xrange] = Range
def normalize_theta_set(theta):
"""
Normalize a Real Set `theta` in the Interval [0, 2*pi). It returns
a normalized value of theta in the Set. For Interval, a maximum of
one cycle [0, 2*pi], is returned i.e. for theta equal to [0, 10*pi],
returned normalized value would be [0, 2*pi). As of now intervals
with end points as non-multiples of `pi` is not supported.
Raises
======
NotImplementedError
The algorithms for Normalizing theta Set are not yet
implemented.
ValueError
The input is not valid, i.e. the input is not a real set.
RuntimeError
It is a bug, please report to the github issue tracker.
Examples
========
>>> from sympy.sets.fancysets import normalize_theta_set
>>> from sympy import Interval, FiniteSet, pi
>>> normalize_theta_set(Interval(9*pi/2, 5*pi))
[pi/2, pi]
>>> normalize_theta_set(Interval(-3*pi/2, pi/2))
[0, 2*pi)
>>> normalize_theta_set(Interval(-pi/2, pi/2))
[0, pi/2] U [3*pi/2, 2*pi)
>>> normalize_theta_set(Interval(-4*pi, 3*pi))
[0, 2*pi)
>>> normalize_theta_set(Interval(-3*pi/2, -pi/2))
[pi/2, 3*pi/2]
>>> normalize_theta_set(FiniteSet(0, pi, 3*pi))
{0, pi}
"""
from sympy.functions.elementary.trigonometric import _pi_coeff as coeff
if theta.is_Interval:
interval_len = theta.measure
# one complete circle
if interval_len >= 2*S.Pi:
if interval_len == 2*S.Pi and theta.left_open and theta.right_open:
k = coeff(theta.start)
return Union(Interval(0, k*S.Pi, False, True),
Interval(k*S.Pi, 2*S.Pi, True, True))
return Interval(0, 2*S.Pi, False, True)
k_start, k_end = coeff(theta.start), coeff(theta.end)
if k_start is None or k_end is None:
raise NotImplementedError("Normalizing theta without pi as coefficient is "
"not yet implemented")
new_start = k_start*S.Pi
new_end = k_end*S.Pi
if new_start > new_end:
return Union(Interval(S.Zero, new_end, False, theta.right_open),
Interval(new_start, 2*S.Pi, theta.left_open, True))
else:
return Interval(new_start, new_end, theta.left_open, theta.right_open)
elif theta.is_FiniteSet:
new_theta = []
for element in theta:
k = coeff(element)
if k is None:
raise NotImplementedError('Normalizing theta without pi as '
'coefficient, is not Implemented.')
else:
new_theta.append(k*S.Pi)
return FiniteSet(*new_theta)
elif theta.is_Union:
return Union(*[normalize_theta_set(interval) for interval in theta.args])
elif theta.is_subset(S.Reals):
raise NotImplementedError("Normalizing theta when, it is of type %s is not "
"implemented" % type(theta))
else:
raise ValueError(" %s is not a real set" % (theta))
class ComplexRegion(Set):
"""
Represents the Set of all Complex Numbers. It can represent a
region of Complex Plane in both the standard forms Polar and
Rectangular coordinates.
* Polar Form
Input is in the form of the ProductSet or Union of ProductSets
of the intervals of r and theta, & use the flag polar=True.
Z = {z in C | z = r*[cos(theta) + I*sin(theta)], r in [r], theta in [theta]}
* Rectangular Form
Input is in the form of the ProductSet or Union of ProductSets
of interval of x and y the of the Complex numbers in a Plane.
Default input type is in rectangular form.
Z = {z in C | z = x + I*y, x in [Re(z)], y in [Im(z)]}
Examples
========
>>> from sympy.sets.fancysets import ComplexRegion
>>> from sympy.sets import Interval
>>> from sympy import S, I, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 6)
>>> c = Interval(1, 8)
>>> c1 = ComplexRegion(a*b) # Rectangular Form
>>> c1
ComplexRegion([2, 3] x [4, 6], False)
* c1 represents the rectangular region in complex plane
surrounded by the coordinates (2, 4), (3, 4), (3, 6) and
(2, 6), of the four vertices.
>>> c2 = ComplexRegion(Union(a*b, b*c))
>>> c2
ComplexRegion([2, 3] x [4, 6] U [4, 6] x [1, 8], False)
* c2 represents the Union of two rectangular regions in complex
plane. One of them surrounded by the coordinates of c1 and
other surrounded by the coordinates (4, 1), (6, 1), (6, 8) and
(4, 8).
>>> 2.5 + 4.5*I in c1
True
>>> 2.5 + 6.5*I in c1
False
>>> r = Interval(0, 1)
>>> theta = Interval(0, 2*S.Pi)
>>> c2 = ComplexRegion(r*theta, polar=True) # Polar Form
>>> c2 # unit Disk
ComplexRegion([0, 1] x [0, 2*pi), True)
* c2 represents the region in complex plane inside the
Unit Disk centered at the origin.
>>> 0.5 + 0.5*I in c2
True
>>> 1 + 2*I in c2
False
>>> unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True)
>>> upper_half_unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True)
>>> intersection = unit_disk.intersect(upper_half_unit_disk)
>>> intersection
ComplexRegion([0, 1] x [0, pi], True)
>>> intersection == upper_half_unit_disk
True
See Also
========
Reals
"""
is_ComplexRegion = True
def __new__(cls, sets, polar=False):
from sympy import sin, cos
x, y, r, theta = symbols('x, y, r, theta', cls=Dummy)
I = S.ImaginaryUnit
polar = sympify(polar)
# Rectangular Form
if polar == False:
if all(_a.is_FiniteSet for _a in sets.args) and (len(sets.args) == 2):
# ** ProductSet of FiniteSets in the Complex Plane. **
# For Cases like ComplexRegion({2, 4}*{3}), It
# would return {2 + 3*I, 4 + 3*I}
complex_num = []
for x in sets.args[0]:
for y in sets.args[1]:
complex_num.append(x + I*y)
obj = FiniteSet(*complex_num)
else:
obj = ImageSet.__new__(cls, Lambda((x, y), x + I*y), sets)
obj._variables = (x, y)
obj._expr = x + I*y
# Polar Form
elif polar == True:
new_sets = []
# sets is Union of ProductSets
if not sets.is_ProductSet:
for k in sets.args:
new_sets.append(k)
# sets is ProductSets
else:
new_sets.append(sets)
# Normalize input theta
for k, v in enumerate(new_sets):
from sympy.sets import ProductSet
new_sets[k] = ProductSet(v.args[0],
normalize_theta_set(v.args[1]))
sets = Union(*new_sets)
obj = ImageSet.__new__(cls, Lambda((r, theta),
r*(cos(theta) + I*sin(theta))),
sets)
obj._variables = (r, theta)
obj._expr = r*(cos(theta) + I*sin(theta))
else:
raise ValueError("polar should be either True or False")
obj._sets = sets
obj._polar = polar
return obj
@property
def sets(self):
"""
Return raw input sets to the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.sets
[2, 3] x [4, 5]
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.sets
[2, 3] x [4, 5] U [4, 5] x [1, 7]
"""
return self._sets
@property
def args(self):
return (self._sets, self._polar)
@property
def variables(self):
return self._variables
@property
def expr(self):
return self._expr
@property
def psets(self):
"""
Return a tuple of sets (ProductSets) input of the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.psets
([2, 3] x [4, 5],)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.psets
([2, 3] x [4, 5], [4, 5] x [1, 7])
"""
if self.sets.is_ProductSet:
psets = ()
psets = psets + (self.sets, )
else:
psets = self.sets.args
return psets
@property
def a_interval(self):
"""
Return the union of intervals of `x` when, self is in
rectangular form, or the union of intervals of `r` when
self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.a_interval
[2, 3]
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.a_interval
[2, 3] U [4, 5]
"""
a_interval = []
for element in self.psets:
a_interval.append(element.args[0])
a_interval = Union(*a_interval)
return a_interval
@property
def b_interval(self):
"""
Return the union of intervals of `y` when, self is in
rectangular form, or the union of intervals of `theta`
when self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.b_interval
[4, 5]
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.b_interval
[1, 7]
"""
b_interval = []
for element in self.psets:
b_interval.append(element.args[1])
b_interval = Union(*b_interval)
return b_interval
@property
def polar(self):
"""
Returns True if self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union, S
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> theta = Interval(0, 2*S.Pi)
>>> C1 = ComplexRegion(a*b)
>>> C1.polar
False
>>> C2 = ComplexRegion(a*theta, polar=True)
>>> C2.polar
True
"""
return self._polar
@property
def _measure(self):
"""
The measure of self.sets.
Examples
========
>>> from sympy import Interval, ComplexRegion, S
>>> a, b = Interval(2, 5), Interval(4, 8)
>>> c = Interval(0, 2*S.Pi)
>>> c1 = ComplexRegion(a*b)
>>> c1.measure
12
>>> c2 = ComplexRegion(a*c, polar=True)
>>> c2.measure
6*pi
"""
return self.sets._measure
def _contains(self, other):
from sympy.functions import arg, Abs
from sympy.core.containers import Tuple
other = sympify(other)
isTuple = isinstance(other, Tuple)
if isTuple and len(other) != 2:
raise ValueError('expecting Tuple of length 2')
# self in rectangular form
if not self.polar:
re, im = other if isTuple else other.as_real_imag()
for element in self.psets:
if And(element.args[0]._contains(re),
element.args[1]._contains(im)):
return True
return False
# self in polar form
elif self.polar:
if isTuple:
r, theta = other
elif other.is_zero:
r, theta = S.Zero, S.Zero
else:
r, theta = Abs(other), arg(other)
for element in self.psets:
if And(element.args[0]._contains(r),
element.args[1]._contains(theta)):
return True
return False
def _intersect(self, other):
if other.is_ComplexRegion:
# self in rectangular form
if (not self.polar) and (not other.polar):
return ComplexRegion(Intersection(self.sets, other.sets))
# self in polar form
elif self.polar and other.polar:
r1, theta1 = self.a_interval, self.b_interval
r2, theta2 = other.a_interval, other.b_interval
new_r_interval = Intersection(r1, r2)
new_theta_interval = Intersection(theta1, theta2)
# 0 and 2*Pi means the same
if ((2*S.Pi in theta1 and S.Zero in theta2) or
(2*S.Pi in theta2 and S.Zero in theta1)):
new_theta_interval = Union(new_theta_interval,
FiniteSet(0))
return ComplexRegion(new_r_interval*new_theta_interval,
polar=True)
if other is S.Reals:
return other
if other.is_subset(S.Reals):
new_interval = []
# self in rectangular form
if not self.polar:
for element in self.psets:
if S.Zero in element.args[0]:
new_interval.append(element.args[0])
new_interval = Union(*new_interval)
return Intersection(new_interval, other)
# self in polar form
elif self.polar:
for element in self.psets:
if (0 in element.args[1]) or (S.Pi in element.args[1]):
new_interval.append(element.args[0])
new_interval = Union(*new_interval)
return Intersection(new_interval, other)
def _union(self, other):
if other.is_ComplexRegion:
# self in rectangular form
if (not self.polar) and (not other.polar):
return ComplexRegion(Union(self.sets, other.sets))
# self in polar form
elif self.polar and other.polar:
return ComplexRegion(Union(self.sets, other.sets), polar=True)
if self == S.Complexes:
return self
return None
class Complexes(with_metaclass(Singleton, ComplexRegion)):
def __new__(cls):
return ComplexRegion.__new__(cls, S.Reals*S.Reals)
def __eq__(self, other):
return other == ComplexRegion(S.Reals*S.Reals)
def __hash__(self):
return hash(ComplexRegion(S.Reals*S.Reals))
def __str__(self):
return "S.Complexes"
def __repr__(self):
return "S.Complexes"
| bsd-3-clause | -3,314,107,003,832,846,000 | 31.430257 | 90 | 0.499444 | false |
himanshu-setia/keystone | keystone/tests/unit/test_v3_federation.py | 1 | 149331 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import random
from testtools import matchers
import uuid
import fixtures
from lxml import etree
import mock
from oslo_config import cfg
from oslo_log import versionutils
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslotest import mockpatch
import saml2
from saml2 import saml
from saml2 import sigver
from six.moves import http_client
from six.moves import range, urllib, zip
xmldsig = importutils.try_import("saml2.xmldsig")
if not xmldsig:
xmldsig = importutils.try_import("xmldsig")
from keystone.auth import controllers as auth_controllers
from keystone.common import environment
from keystone.contrib.federation import routers
from keystone import exception
from keystone.federation import controllers as federation_controllers
from keystone.federation import idp as keystone_idp
from keystone import notifications
from keystone.tests import unit
from keystone.tests.unit import core
from keystone.tests.unit import federation_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import mapping_fixtures
from keystone.tests.unit import test_v3
from keystone.tests.unit import utils
from keystone.token.providers import common as token_common
subprocess = environment.subprocess
CONF = cfg.CONF
ROOTDIR = os.path.dirname(os.path.abspath(__file__))
XMLDIR = os.path.join(ROOTDIR, 'saml2/')
def dummy_validator(*args, **kwargs):
pass
class FederationTests(test_v3.RestfulTestCase):
@mock.patch.object(versionutils, 'report_deprecated_feature')
def test_exception_happens(self, mock_deprecator):
routers.FederationExtension(mock.ANY)
mock_deprecator.assert_called_once_with(mock.ANY, mock.ANY)
args, _kwargs = mock_deprecator.call_args
self.assertIn("Remove federation_extension from", args[1])
class FederatedSetupMixin(object):
ACTION = 'authenticate'
IDP = 'ORG_IDP'
PROTOCOL = 'saml2'
AUTH_METHOD = 'saml2'
USER = 'user@ORGANIZATION'
ASSERTION_PREFIX = 'PREFIX_'
IDP_WITH_REMOTE = 'ORG_IDP_REMOTE'
REMOTE_IDS = ['entityID_IDP1', 'entityID_IDP2']
REMOTE_ID_ATTR = uuid.uuid4().hex
UNSCOPED_V3_SAML2_REQ = {
"identity": {
"methods": [AUTH_METHOD],
AUTH_METHOD: {
"identity_provider": IDP,
"protocol": PROTOCOL
}
}
}
def _check_domains_are_valid(self, token):
self.assertEqual('Federated', token['user']['domain']['id'])
self.assertEqual('Federated', token['user']['domain']['name'])
def _project(self, project):
return (project['id'], project['name'])
def _roles(self, roles):
return set([(r['id'], r['name']) for r in roles])
def _check_projects_and_roles(self, token, roles, projects):
"""Check whether the projects and the roles match."""
token_roles = token.get('roles')
if token_roles is None:
raise AssertionError('Roles not found in the token')
token_roles = self._roles(token_roles)
roles_ref = self._roles(roles)
self.assertEqual(token_roles, roles_ref)
token_projects = token.get('project')
if token_projects is None:
raise AssertionError('Projects not found in the token')
token_projects = self._project(token_projects)
projects_ref = self._project(projects)
self.assertEqual(token_projects, projects_ref)
def _check_scoped_token_attributes(self, token):
for obj in ('user', 'catalog', 'expires_at', 'issued_at',
'methods', 'roles'):
self.assertIn(obj, token)
os_federation = token['user']['OS-FEDERATION']
self.assertIn('groups', os_federation)
self.assertIn('identity_provider', os_federation)
self.assertIn('protocol', os_federation)
self.assertThat(os_federation, matchers.HasLength(3))
self.assertEqual(self.IDP, os_federation['identity_provider']['id'])
self.assertEqual(self.PROTOCOL, os_federation['protocol']['id'])
def _check_project_scoped_token_attributes(self, token, project_id):
self.assertEqual(project_id, token['project']['id'])
self._check_scoped_token_attributes(token)
def _check_domain_scoped_token_attributes(self, token, domain_id):
self.assertEqual(domain_id, token['domain']['id'])
self._check_scoped_token_attributes(token)
def assertValidMappedUser(self, token):
"""Check if user object meets all the criteria."""
user = token['user']
self.assertIn('id', user)
self.assertIn('name', user)
self.assertIn('domain', user)
self.assertIn('groups', user['OS-FEDERATION'])
self.assertIn('identity_provider', user['OS-FEDERATION'])
self.assertIn('protocol', user['OS-FEDERATION'])
# Make sure user_id is url safe
self.assertEqual(urllib.parse.quote(user['name']), user['id'])
def _issue_unscoped_token(self,
idp=None,
assertion='EMPLOYEE_ASSERTION',
environment=None):
api = federation_controllers.Auth()
context = {'environment': environment or {}}
self._inject_assertion(context, assertion)
if idp is None:
idp = self.IDP
r = api.federated_authentication(context, idp, self.PROTOCOL)
return r
def idp_ref(self, id=None):
idp = {
'id': id or uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex
}
return idp
def proto_ref(self, mapping_id=None):
proto = {
'id': uuid.uuid4().hex,
'mapping_id': mapping_id or uuid.uuid4().hex
}
return proto
def mapping_ref(self, rules=None):
return {
'id': uuid.uuid4().hex,
'rules': rules or self.rules['rules']
}
def _scope_request(self, unscoped_token_id, scope, scope_id):
return {
'auth': {
'identity': {
'methods': [
self.AUTH_METHOD
],
self.AUTH_METHOD: {
'id': unscoped_token_id
}
},
'scope': {
scope: {
'id': scope_id
}
}
}
}
def _inject_assertion(self, context, variant, query_string=None):
assertion = getattr(mapping_fixtures, variant)
context['environment'].update(assertion)
context['query_string'] = query_string or []
def load_federation_sample_data(self):
"""Inject additional data."""
# Create and add domains
self.domainA = unit.new_domain_ref()
self.resource_api.create_domain(self.domainA['id'],
self.domainA)
self.domainB = unit.new_domain_ref()
self.resource_api.create_domain(self.domainB['id'],
self.domainB)
self.domainC = unit.new_domain_ref()
self.resource_api.create_domain(self.domainC['id'],
self.domainC)
self.domainD = unit.new_domain_ref()
self.resource_api.create_domain(self.domainD['id'],
self.domainD)
# Create and add projects
self.proj_employees = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.proj_employees['id'],
self.proj_employees)
self.proj_customers = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.proj_customers['id'],
self.proj_customers)
self.project_all = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.project_all['id'],
self.project_all)
self.project_inherited = unit.new_project_ref(
domain_id=self.domainD['id'])
self.resource_api.create_project(self.project_inherited['id'],
self.project_inherited)
# Create and add groups
self.group_employees = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_employees = (
self.identity_api.create_group(self.group_employees))
self.group_customers = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_customers = (
self.identity_api.create_group(self.group_customers))
self.group_admins = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_admins = self.identity_api.create_group(self.group_admins)
# Create and add roles
self.role_employee = unit.new_role_ref()
self.role_api.create_role(self.role_employee['id'], self.role_employee)
self.role_customer = unit.new_role_ref()
self.role_api.create_role(self.role_customer['id'], self.role_customer)
self.role_admin = unit.new_role_ref()
self.role_api.create_role(self.role_admin['id'], self.role_admin)
# Employees can access
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_all['id'])
# Customers can access
# * proj_customers
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
project_id=self.proj_customers['id'])
# Admins can access:
# * proj_customers
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_customers['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.project_all['id'])
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access:
# * domain A
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access projects via inheritance:
# * domain D
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainD['id'],
inherited_to_projects=True)
# Employees can access:
# * domain A
# * domain B
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainB['id'])
# Admins can access:
# * domain A
# * domain B
# * domain C
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainB['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainC['id'])
self.rules = {
'rules': [
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'Email',
},
{
'type': 'orgPersonType',
'any_one_of': [
'Employee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': self.ASSERTION_PREFIX + 'UserName'
},
{
'type': self.ASSERTION_PREFIX + 'Email',
},
{
'type': self.ASSERTION_PREFIX + 'orgPersonType',
'any_one_of': [
'SuperEmployee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'Email'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Customer'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_admins['id']
}
},
{
'group': {
'id': self.group_employees['id']
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'Email'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Admin',
'Chief'
]
}
]
},
{
'local': [
{
'group': {
'id': uuid.uuid4().hex
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
'type': 'FirstName',
'any_one_of': [
'Jill'
]
},
{
'type': 'LastName',
'any_one_of': [
'Smith'
]
}
]
},
{
'local': [
{
'group': {
'id': 'this_group_no_longer_exists'
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
'type': 'Email',
'any_one_of': [
'[email protected]'
]
},
{
'type': 'orgPersonType',
'any_one_of': [
'Tester'
]
}
]
},
# rules with local group names
{
"local": [
{
'user': {
'name': '{0}',
'id': '{1}'
}
},
{
"group": {
"name": self.group_customers['name'],
"domain": {
"name": self.domainA['name']
}
}
}
],
"remote": [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
"type": "orgPersonType",
"any_one_of": [
"CEO",
"CTO"
],
}
]
},
{
"local": [
{
'user': {
'name': '{0}',
'id': '{1}'
}
},
{
"group": {
"name": self.group_admins['name'],
"domain": {
"id": self.domainA['id']
}
}
}
],
"remote": [
{
"type": "UserName",
},
{
"type": "Email",
},
{
"type": "orgPersonType",
"any_one_of": [
"Managers"
]
}
]
},
{
"local": [
{
"user": {
"name": "{0}",
"id": "{1}"
}
},
{
"group": {
"name": "NON_EXISTING",
"domain": {
"id": self.domainA['id']
}
}
}
],
"remote": [
{
"type": "UserName",
},
{
"type": "Email",
},
{
"type": "UserName",
"any_one_of": [
"IamTester"
]
}
]
},
{
"local": [
{
"user": {
"type": "local",
"name": self.user['name'],
"domain": {
"id": self.user['domain_id']
}
}
},
{
"group": {
"id": self.group_customers['id']
}
}
],
"remote": [
{
"type": "UserType",
"any_one_of": [
"random"
]
}
]
},
{
"local": [
{
"user": {
"type": "local",
"name": self.user['name'],
"domain": {
"id": uuid.uuid4().hex
}
}
}
],
"remote": [
{
"type": "Position",
"any_one_of": [
"DirectorGeneral"
]
}
]
}
]
}
# Add IDP
self.idp = self.idp_ref(id=self.IDP)
self.federation_api.create_idp(self.idp['id'],
self.idp)
# Add IDP with remote
self.idp_with_remote = self.idp_ref(id=self.IDP_WITH_REMOTE)
self.idp_with_remote['remote_ids'] = self.REMOTE_IDS
self.federation_api.create_idp(self.idp_with_remote['id'],
self.idp_with_remote)
# Add a mapping
self.mapping = self.mapping_ref()
self.federation_api.create_mapping(self.mapping['id'],
self.mapping)
# Add protocols
self.proto_saml = self.proto_ref(mapping_id=self.mapping['id'])
self.proto_saml['id'] = self.PROTOCOL
self.federation_api.create_protocol(self.idp['id'],
self.proto_saml['id'],
self.proto_saml)
# Add protocols IDP with remote
self.federation_api.create_protocol(self.idp_with_remote['id'],
self.proto_saml['id'],
self.proto_saml)
# Generate fake tokens
context = {'environment': {}}
self.tokens = {}
VARIANTS = ('EMPLOYEE_ASSERTION', 'CUSTOMER_ASSERTION',
'ADMIN_ASSERTION')
api = auth_controllers.Auth()
for variant in VARIANTS:
self._inject_assertion(context, variant)
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.tokens[variant] = r.headers.get('X-Subject-Token')
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN = self._scope_request(
uuid.uuid4().hex, 'project', self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE = self._scope_request(
self.tokens['EMPLOYEE_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'project',
self.project_inherited['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain',
self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainD['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain',
self.domainC['id'])
class FederatedIdentityProviderTests(test_v3.RestfulTestCase):
"""A test class for Identity Providers."""
idp_keys = ['description', 'enabled']
default_body = {'description': None, 'enabled': True}
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/identity_providers/' + str(suffix)
return '/OS-FEDERATION/identity_providers'
def _fetch_attribute_from_response(self, resp, parameter,
assert_is_not_none=True):
"""Fetch single attribute from TestResponse object."""
result = resp.result.get(parameter)
if assert_is_not_none:
self.assertIsNotNone(result)
return result
def _create_and_decapsulate_response(self, body=None):
"""Create IdP and fetch it's random id along with entity."""
default_resp = self._create_default_idp(body=body)
idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
self.assertIsNotNone(idp)
idp_id = idp.get('id')
return (idp_id, idp)
def _get_idp(self, idp_id):
"""Fetch IdP entity based on its id."""
url = self.base_url(suffix=idp_id)
resp = self.get(url)
return resp
def _create_default_idp(self, body=None):
"""Create default IdP."""
url = self.base_url(suffix=uuid.uuid4().hex)
if body is None:
body = self._http_idp_input()
resp = self.put(url, body={'identity_provider': body},
expected_status=http_client.CREATED)
return resp
def _http_idp_input(self, **kwargs):
"""Create default input for IdP data."""
body = None
if 'body' not in kwargs:
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
else:
body = kwargs['body']
return body
def _assign_protocol_to_idp(self, idp_id=None, proto=None, url=None,
mapping_id=None, validate=True, **kwargs):
if url is None:
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
if idp_id is None:
idp_id, _ = self._create_and_decapsulate_response()
if proto is None:
proto = uuid.uuid4().hex
if mapping_id is None:
mapping_id = uuid.uuid4().hex
body = {'mapping_id': mapping_id}
url = url % {'idp_id': idp_id, 'protocol_id': proto}
resp = self.put(url, body={'protocol': body}, **kwargs)
if validate:
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': mapping_id})
return (resp, idp_id, proto)
def _get_protocol(self, idp_id, protocol_id):
url = "%s/protocols/%s" % (idp_id, protocol_id)
url = self.base_url(suffix=url)
r = self.get(url)
return r
def test_create_idp(self):
"""Creates the IdentityProvider entity associated to remote_ids."""
keys_to_check = list(self.idp_keys)
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_create_idp_remote(self):
"""Creates the IdentityProvider entity associated to remote_ids."""
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['remote_ids'] = [uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex]
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_create_idp_remote_repeated(self):
"""Creates two IdentityProvider entities with some remote_ids
A remote_id is the same for both so the second IdP is not
created because of the uniqueness of the remote_ids
Expect HTTP 409 Conflict code for the latter call.
"""
body = self.default_body.copy()
repeated_remote_id = uuid.uuid4().hex
body['remote_ids'] = [uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex,
repeated_remote_id]
self._create_default_idp(body=body)
url = self.base_url(suffix=uuid.uuid4().hex)
body['remote_ids'] = [uuid.uuid4().hex,
repeated_remote_id]
resp = self.put(url, body={'identity_provider': body},
expected_status=http_client.CONFLICT)
resp_data = jsonutils.loads(resp.body)
self.assertIn('Duplicate remote ID',
resp_data.get('error', {}).get('message'))
def test_create_idp_remote_empty(self):
"""Creates an IdP with empty remote_ids."""
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['remote_ids'] = []
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_create_idp_remote_none(self):
"""Creates an IdP with a None remote_ids."""
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['remote_ids'] = None
resp = self._create_default_idp(body=body)
expected = body.copy()
expected['remote_ids'] = []
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=expected)
def test_update_idp_remote_ids(self):
"""Update IdP's remote_ids parameter."""
body = self.default_body.copy()
body['remote_ids'] = [uuid.uuid4().hex]
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
body['remote_ids'] = [uuid.uuid4().hex, uuid.uuid4().hex]
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
self.assertEqual(sorted(body['remote_ids']),
sorted(updated_idp.get('remote_ids')))
resp = self.get(url)
returned_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
self.assertEqual(sorted(body['remote_ids']),
sorted(returned_idp.get('remote_ids')))
def test_update_idp_clean_remote_ids(self):
"""Update IdP's remote_ids parameter with an empty list."""
body = self.default_body.copy()
body['remote_ids'] = [uuid.uuid4().hex]
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
body['remote_ids'] = []
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
self.assertEqual(sorted(body['remote_ids']),
sorted(updated_idp.get('remote_ids')))
resp = self.get(url)
returned_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
self.assertEqual(sorted(body['remote_ids']),
sorted(returned_idp.get('remote_ids')))
def test_list_idps(self, iterations=5):
"""Lists all available IdentityProviders.
This test collects ids of created IdPs and
intersects it with the list of all available IdPs.
List of all IdPs can be a superset of IdPs created in this test,
because other tests also create IdPs.
"""
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
ids = []
for _ in range(iterations):
id = get_id(self._create_default_idp())
ids.append(id)
ids = set(ids)
keys_to_check = self.idp_keys
url = self.base_url()
resp = self.get(url)
self.assertValidListResponse(resp, 'identity_providers',
dummy_validator,
keys_to_check=keys_to_check)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = set([e['id'] for e in entities])
ids_intersection = entities_ids.intersection(ids)
self.assertEqual(ids_intersection, ids)
def test_filter_list_idp_by_id(self):
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
idp1_id = get_id(self._create_default_idp())
idp2_id = get_id(self._create_default_idp())
# list the IdP, should get two IdP.
url = self.base_url()
resp = self.get(url)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = [e['id'] for e in entities]
self.assertItemsEqual(entities_ids, [idp1_id, idp2_id])
# filter the IdP by ID.
url = self.base_url() + '?id=' + idp1_id
resp = self.get(url)
filtered_service_list = resp.json['identity_providers']
self.assertThat(filtered_service_list, matchers.HasLength(1))
self.assertEqual(idp1_id, filtered_service_list[0].get('id'))
def test_filter_list_idp_by_enabled(self):
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
idp1_id = get_id(self._create_default_idp())
body = self.default_body.copy()
body['enabled'] = False
idp2_id = get_id(self._create_default_idp(body=body))
# list the IdP, should get two IdP.
url = self.base_url()
resp = self.get(url)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = [e['id'] for e in entities]
self.assertItemsEqual(entities_ids, [idp1_id, idp2_id])
# filter the IdP by 'enabled'.
url = self.base_url() + '?enabled=True'
resp = self.get(url)
filtered_service_list = resp.json['identity_providers']
self.assertThat(filtered_service_list, matchers.HasLength(1))
self.assertEqual(idp1_id, filtered_service_list[0].get('id'))
def test_check_idp_uniqueness(self):
"""Add same IdP twice.
Expect HTTP 409 Conflict code for the latter call.
"""
url = self.base_url(suffix=uuid.uuid4().hex)
body = self._http_idp_input()
self.put(url, body={'identity_provider': body},
expected_status=http_client.CREATED)
resp = self.put(url, body={'identity_provider': body},
expected_status=http_client.CONFLICT)
resp_data = jsonutils.loads(resp.body)
self.assertIn('Duplicate entry',
resp_data.get('error', {}).get('message'))
def test_get_idp(self):
"""Create and later fetch IdP."""
body = self._http_idp_input()
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
resp = self.get(url)
self.assertValidResponse(resp, 'identity_provider',
dummy_validator, keys_to_check=body.keys(),
ref=body)
def test_get_nonexisting_idp(self):
"""Fetch nonexisting IdP entity.
Expected HTTP 404 Not Found status code.
"""
idp_id = uuid.uuid4().hex
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_delete_existing_idp(self):
"""Create and later delete IdP.
Expect HTTP 404 Not Found for the GET IdP call.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.delete(url)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_delete_idp_also_deletes_assigned_protocols(self):
"""Deleting an IdP will delete its assigned protocol."""
# create default IdP
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp['id']
protocol_id = uuid.uuid4().hex
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
idp_url = self.base_url(suffix=idp_id)
# assign protocol to IdP
kwargs = {'expected_status': http_client.CREATED}
resp, idp_id, proto = self._assign_protocol_to_idp(
url=url,
idp_id=idp_id,
proto=protocol_id,
**kwargs)
# removing IdP will remove the assigned protocol as well
self.assertEqual(1, len(self.federation_api.list_protocols(idp_id)))
self.delete(idp_url)
self.get(idp_url, expected_status=http_client.NOT_FOUND)
self.assertEqual(0, len(self.federation_api.list_protocols(idp_id)))
def test_delete_nonexisting_idp(self):
"""Delete nonexisting IdP.
Expect HTTP 404 Not Found for the GET IdP call.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
self.delete(url, expected_status=http_client.NOT_FOUND)
def test_update_idp_mutable_attributes(self):
"""Update IdP's mutable parameters."""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
_enabled = not default_idp.get('enabled')
body = {'remote_ids': [uuid.uuid4().hex, uuid.uuid4().hex],
'description': uuid.uuid4().hex,
'enabled': _enabled}
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
for key in body.keys():
if isinstance(body[key], list):
self.assertEqual(sorted(body[key]),
sorted(updated_idp.get(key)))
else:
self.assertEqual(body[key], updated_idp.get(key))
resp = self.get(url)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
for key in body.keys():
if isinstance(body[key], list):
self.assertEqual(sorted(body[key]),
sorted(updated_idp.get(key)))
else:
self.assertEqual(body[key], updated_idp.get(key))
def test_update_idp_immutable_attributes(self):
"""Update IdP's immutable parameters.
Expect HTTP BAD REQUEST.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
body = self._http_idp_input()
body['id'] = uuid.uuid4().hex
body['protocols'] = [uuid.uuid4().hex, uuid.uuid4().hex]
url = self.base_url(suffix=idp_id)
self.patch(url, body={'identity_provider': body},
expected_status=http_client.BAD_REQUEST)
def test_update_nonexistent_idp(self):
"""Update nonexistent IdP
Expect HTTP 404 Not Found code.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
body = self._http_idp_input()
body['enabled'] = False
body = {'identity_provider': body}
self.patch(url, body=body, expected_status=http_client.NOT_FOUND)
def test_assign_protocol_to_idp(self):
"""Assign a protocol to existing IdP."""
self._assign_protocol_to_idp(expected_status=http_client.CREATED)
def test_protocol_composite_pk(self):
"""Test that Keystone can add two entities.
The entities have identical names, however, attached to different
IdPs.
1. Add IdP and assign it protocol with predefined name
2. Add another IdP and assign it a protocol with same name.
Expect HTTP 201 code
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': http_client.CREATED}
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
def test_protocol_idp_pk_uniqueness(self):
"""Test whether Keystone checks for unique idp/protocol values.
Add same protocol twice, expect Keystone to reject a latter call and
return HTTP 409 Conflict code.
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': http_client.CREATED}
resp, idp_id, proto = self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
kwargs = {'expected_status': http_client.CONFLICT}
resp, idp_id, proto = self._assign_protocol_to_idp(idp_id=idp_id,
proto='saml2',
validate=False,
url=url, **kwargs)
def test_assign_protocol_to_nonexistent_idp(self):
"""Assign protocol to IdP that doesn't exist.
Expect HTTP 404 Not Found code.
"""
idp_id = uuid.uuid4().hex
kwargs = {'expected_status': http_client.NOT_FOUND}
self._assign_protocol_to_idp(proto='saml2',
idp_id=idp_id,
validate=False,
**kwargs)
def test_get_protocol(self):
"""Create and later fetch protocol tied to IdP."""
resp, idp_id, proto = self._assign_protocol_to_idp(
expected_status=http_client.CREATED)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id']
url = "%s/protocols/%s" % (idp_id, proto_id)
url = self.base_url(suffix=url)
resp = self.get(url)
reference = {'id': proto_id}
self.assertValidResponse(resp, 'protocol',
dummy_validator,
keys_to_check=reference.keys(),
ref=reference)
def test_list_protocols(self):
"""Create set of protocols and later list them.
Compare input and output id sets.
"""
resp, idp_id, proto = self._assign_protocol_to_idp(
expected_status=http_client.CREATED)
iterations = random.randint(0, 16)
protocol_ids = []
for _ in range(iterations):
resp, _, proto = self._assign_protocol_to_idp(
idp_id=idp_id,
expected_status=http_client.CREATED)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')
proto_id = proto_id['id']
protocol_ids.append(proto_id)
url = "%s/protocols" % idp_id
url = self.base_url(suffix=url)
resp = self.get(url)
self.assertValidListResponse(resp, 'protocols',
dummy_validator,
keys_to_check=['id'])
entities = self._fetch_attribute_from_response(resp, 'protocols')
entities = set([entity['id'] for entity in entities])
protocols_intersection = entities.intersection(protocol_ids)
self.assertEqual(protocols_intersection, set(protocol_ids))
def test_update_protocols_attribute(self):
"""Update protocol's attribute."""
resp, idp_id, proto = self._assign_protocol_to_idp(
expected_status=http_client.CREATED)
new_mapping_id = uuid.uuid4().hex
url = "%s/protocols/%s" % (idp_id, proto)
url = self.base_url(suffix=url)
body = {'mapping_id': new_mapping_id}
resp = self.patch(url, body={'protocol': body})
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': new_mapping_id}
)
def test_delete_protocol(self):
"""Delete protocol.
Expect HTTP 404 Not Found code for the GET call after the protocol is
deleted.
"""
url = self.base_url(suffix='/%(idp_id)s/'
'protocols/%(protocol_id)s')
resp, idp_id, proto = self._assign_protocol_to_idp(
expected_status=http_client.CREATED)
url = url % {'idp_id': idp_id,
'protocol_id': proto}
self.delete(url)
self.get(url, expected_status=http_client.NOT_FOUND)
class MappingCRUDTests(test_v3.RestfulTestCase):
"""A class for testing CRUD operations for Mappings."""
MAPPING_URL = '/OS-FEDERATION/mappings/'
def assertValidMappingListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'mappings',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMappingResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'mapping',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMapping(self, entity, ref=None):
self.assertIsNotNone(entity.get('id'))
self.assertIsNotNone(entity.get('rules'))
if ref:
self.assertEqual(entity['rules'], ref['rules'])
return entity
def _create_default_mapping_entry(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(url,
body={'mapping': mapping_fixtures.MAPPING_LARGE},
expected_status=http_client.CREATED)
return resp
def _get_id_from_response(self, resp):
r = resp.result.get('mapping')
return r.get('id')
def test_mapping_create(self):
resp = self._create_default_mapping_entry()
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_list(self):
url = self.MAPPING_URL
self._create_default_mapping_entry()
resp = self.get(url)
entities = resp.result.get('mappings')
self.assertIsNotNone(entities)
self.assertResponseStatus(resp, http_client.OK)
self.assertValidListLinks(resp.result.get('links'))
self.assertEqual(1, len(entities))
def test_mapping_delete(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': str(mapping_id)}
resp = self.delete(url)
self.assertResponseStatus(resp, http_client.NO_CONTENT)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_mapping_get(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_update(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.patch(url,
body={'mapping': mapping_fixtures.MAPPING_SMALL})
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
def test_delete_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.delete(url, expected_status=http_client.NOT_FOUND)
def test_get_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.get(url, expected_status=http_client.NOT_FOUND)
def test_create_mapping_bad_requirements(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_BAD_REQ})
def test_create_mapping_no_rules(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_NO_RULES})
def test_create_mapping_no_remote_objects(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_NO_REMOTE})
def test_create_mapping_bad_value(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_BAD_VALUE})
def test_create_mapping_missing_local(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_MISSING_LOCAL})
def test_create_mapping_missing_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_MISSING_TYPE})
def test_create_mapping_wrong_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_WRONG_TYPE})
def test_create_mapping_extra_remote_properties_not_any_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_any_one_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_just_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
def test_create_mapping_empty_map(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': {}})
def test_create_mapping_extra_rules_properties(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_EXTRA_RULES_PROPS})
def test_create_mapping_with_blacklist_and_whitelist(self):
"""Test for adding whitelist and blacklist in the rule
Server should respond with HTTP 400 Bad Request error upon discovering
both ``whitelist`` and ``blacklist`` keywords in the same rule.
"""
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_AND_BLACKLIST
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
def test_create_mapping_with_local_user_and_local_domain(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(
url,
body={
'mapping': mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN
},
expected_status=http_client.CREATED)
self.assertValidMappingResponse(
resp, mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN)
def test_create_mapping_with_ephemeral(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(
url,
body={'mapping': mapping_fixtures.MAPPING_EPHEMERAL_USER},
expected_status=http_client.CREATED)
self.assertValidMappingResponse(
resp, mapping_fixtures.MAPPING_EPHEMERAL_USER)
def test_create_mapping_with_bad_user_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
# get a copy of a known good map
bad_mapping = copy.deepcopy(mapping_fixtures.MAPPING_EPHEMERAL_USER)
# now sabotage the user type
bad_mapping['rules'][0]['local'][0]['user']['type'] = uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': bad_mapping})
class FederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin):
def auth_plugin_config_override(self):
methods = ['saml2']
super(FederatedTokenTests, self).auth_plugin_config_override(methods)
def setUp(self):
super(FederatedTokenTests, self).setUp()
self._notifications = []
def fake_saml_notify(action, context, user_id, group_ids,
identity_provider, protocol, token_id, outcome):
note = {
'action': action,
'user_id': user_id,
'identity_provider': identity_provider,
'protocol': protocol,
'send_notification_called': True}
self._notifications.append(note)
self.useFixture(mockpatch.PatchObject(
notifications,
'send_saml_audit_notification',
fake_saml_notify))
def _assert_last_notify(self, action, identity_provider, protocol,
user_id=None):
self.assertTrue(self._notifications)
note = self._notifications[-1]
if user_id:
self.assertEqual(note['user_id'], user_id)
self.assertEqual(note['action'], action)
self.assertEqual(note['identity_provider'], identity_provider)
self.assertEqual(note['protocol'], protocol)
self.assertTrue(note['send_notification_called'])
def load_fixtures(self, fixtures):
super(FederatedTokenTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def test_issue_unscoped_token_notify(self):
self._issue_unscoped_token()
self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL)
def test_issue_unscoped_token(self):
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
self.assertValidMappedUser(r.json['token'])
def test_issue_unscoped_token_disabled_idp(self):
"""Checks if authentication works with disabled identity providers.
Test plan:
1) Disable default IdP
2) Try issuing unscoped token for that IdP
3) Expect server to forbid authentication
"""
enabled_false = {'enabled': False}
self.federation_api.update_idp(self.IDP, enabled_false)
self.assertRaises(exception.Forbidden,
self._issue_unscoped_token)
def test_issue_unscoped_token_group_names_in_mapping(self):
r = self._issue_unscoped_token(assertion='ANOTHER_CUSTOMER_ASSERTION')
ref_groups = set([self.group_customers['id'], self.group_admins['id']])
token_resp = r.json_body
token_groups = token_resp['token']['user']['OS-FEDERATION']['groups']
token_groups = set([group['id'] for group in token_groups])
self.assertEqual(ref_groups, token_groups)
def test_issue_unscoped_tokens_nonexisting_group(self):
self.assertRaises(exception.MissingGroups,
self._issue_unscoped_token,
assertion='ANOTHER_TESTER_ASSERTION')
def test_issue_unscoped_token_with_remote_no_attribute(self):
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_saml2_remote(self):
self.config_fixture.config(group='saml2',
remote_id_attribute=self.REMOTE_ID_ATTR)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote_different(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
self.assertRaises(exception.Forbidden,
self._issue_unscoped_token,
idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR: uuid.uuid4().hex
})
def test_issue_unscoped_token_with_remote_default_overwritten(self):
"""Test that protocol remote_id_attribute has higher priority.
Make sure the parameter stored under ``protocol`` section has higher
priority over parameter from default ``federation`` configuration
section.
"""
self.config_fixture.config(group='saml2',
remote_id_attribute=self.REMOTE_ID_ATTR)
self.config_fixture.config(group='federation',
remote_id_attribute=uuid.uuid4().hex)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote_unavailable(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
idp=self.IDP_WITH_REMOTE,
environment={
uuid.uuid4().hex: uuid.uuid4().hex
})
def test_issue_unscoped_token_with_remote_user_as_empty_string(self):
# make sure that REMOTE_USER set as the empty string won't interfere
r = self._issue_unscoped_token(environment={'REMOTE_USER': ''})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_no_groups(self):
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
assertion='BAD_TESTER_ASSERTION')
def test_issue_unscoped_token_malformed_environment(self):
"""Test whether non string objects are filtered out.
Put non string objects into the environment, inject
correct assertion and try to get an unscoped token.
Expect server not to fail on using split() method on
non string objects and return token id in the HTTP header.
"""
api = auth_controllers.Auth()
context = {
'environment': {
'malformed_object': object(),
'another_bad_idea': tuple(range(10)),
'yet_another_bad_param': dict(zip(uuid.uuid4().hex,
range(32)))
}
}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_scope_to_project_once_notify(self):
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
user_id = r.json['token']['user']['id']
self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL, user_id)
def test_scope_to_project_once(self):
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self._check_project_scoped_token_attributes(token_resp, project_id)
roles_ref = [self.role_employee]
projects_ref = self.proj_employees
self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
self.assertValidMappedUser(token_resp)
def test_scope_token_with_idp_disabled(self):
"""Scope token issued by disabled IdP.
Try scoping the token issued by an IdP which is disabled now. Expect
server to refuse scoping operation.
This test confirms correct behaviour when IdP was enabled and unscoped
token was issued, but disabled before user tries to scope the token.
Here we assume the unscoped token was already issued and start from
the moment where IdP is being disabled and unscoped token is being
used.
Test plan:
1) Disable IdP
2) Try scoping unscoped token
"""
enabled_false = {'enabled': False}
self.federation_api.update_idp(self.IDP, enabled_false)
self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=http_client.FORBIDDEN)
def test_scope_to_bad_project(self):
"""Scope unscoped token with a project we don't have access to."""
self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=http_client.UNAUTHORIZED)
def test_scope_to_project_multiple_times(self):
"""Try to scope the unscoped token multiple times.
The new tokens should be scoped to:
* Customers' project
* Employees' project
"""
bodies = (self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN,
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN)
project_ids = (self.proj_employees['id'],
self.proj_customers['id'])
for body, project_id_ref in zip(bodies, project_ids):
r = self.v3_create_token(body)
token_resp = r.result['token']
self._check_project_scoped_token_attributes(token_resp,
project_id_ref)
def test_scope_to_project_with_only_inherited_roles(self):
"""Try to scope token whose only roles are inherited."""
self.config_fixture.config(group='os_inherit', enabled=True)
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER)
token_resp = r.result['token']
self._check_project_scoped_token_attributes(
token_resp, self.project_inherited['id'])
roles_ref = [self.role_customer]
projects_ref = self.project_inherited
self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
self.assertValidMappedUser(token_resp)
def test_scope_token_from_nonexistent_unscoped_token(self):
"""Try to scope token from non-existent unscoped token."""
self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN,
expected_status=http_client.NOT_FOUND)
def test_issue_token_from_rules_without_user(self):
api = auth_controllers.Auth()
context = {'environment': {}}
self._inject_assertion(context, 'BAD_TESTER_ASSERTION')
self.assertRaises(exception.Unauthorized,
api.authenticate_for_token,
context, self.UNSCOPED_V3_SAML2_REQ)
def test_issue_token_with_nonexistent_group(self):
"""Inject assertion that matches rule issuing bad group id.
Expect server to find out that some groups are missing in the
backend and raise exception.MappedGroupNotFound exception.
"""
self.assertRaises(exception.MappedGroupNotFound,
self._issue_unscoped_token,
assertion='CONTRACTOR_ASSERTION')
def test_scope_to_domain_once(self):
r = self.v3_create_token(self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER)
token_resp = r.result['token']
self._check_domain_scoped_token_attributes(token_resp,
self.domainA['id'])
def test_scope_to_domain_multiple_tokens(self):
"""Issue multiple tokens scoping to different domains.
The new tokens should be scoped to:
* domainA
* domainB
* domainC
"""
bodies = (self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN)
domain_ids = (self.domainA['id'],
self.domainB['id'],
self.domainC['id'])
for body, domain_id_ref in zip(bodies, domain_ids):
r = self.v3_create_token(body)
token_resp = r.result['token']
self._check_domain_scoped_token_attributes(token_resp,
domain_id_ref)
def test_scope_to_domain_with_only_inherited_roles_fails(self):
"""Try to scope to a domain that has no direct roles."""
self.v3_create_token(
self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER,
expected_status=http_client.UNAUTHORIZED)
def test_list_projects(self):
urls = ('/OS-FEDERATION/projects', '/auth/projects')
token = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
self.config_fixture.config(group='os_inherit', enabled=True)
projects_refs = (set([self.proj_customers['id'],
self.project_inherited['id']]),
set([self.proj_employees['id'],
self.project_all['id']]),
set([self.proj_employees['id'],
self.project_all['id'],
self.proj_customers['id'],
self.project_inherited['id']]))
for token, projects_ref in zip(token, projects_refs):
for url in urls:
r = self.get(url, token=token)
projects_resp = r.result['projects']
projects = set(p['id'] for p in projects_resp)
self.assertEqual(projects_ref, projects,
'match failed for url %s' % url)
# TODO(samueldmq): Create another test class for role inheritance tests.
# The advantage would be to reduce the complexity of this test class and
# have tests specific to this functionality grouped, easing readability and
# maintenability.
def test_list_projects_for_inherited_project_assignment(self):
# Enable os_inherit extension
self.config_fixture.config(group='os_inherit', enabled=True)
# Create a subproject
subproject_inherited = unit.new_project_ref(
domain_id=self.domainD['id'],
parent_id=self.project_inherited['id'])
self.resource_api.create_project(subproject_inherited['id'],
subproject_inherited)
# Create an inherited role assignment
self.assignment_api.create_grant(
role_id=self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_inherited['id'],
inherited_to_projects=True)
# Define expected projects from employee assertion, which contain
# the created subproject
expected_project_ids = [self.project_all['id'],
self.proj_employees['id'],
subproject_inherited['id']]
# Assert expected projects for both available URLs
for url in ('/OS-FEDERATION/projects', '/auth/projects'):
r = self.get(url, token=self.tokens['EMPLOYEE_ASSERTION'])
project_ids = [project['id'] for project in r.result['projects']]
self.assertEqual(len(expected_project_ids), len(project_ids))
for expected_project_id in expected_project_ids:
self.assertIn(expected_project_id, project_ids,
'Projects match failed for url %s' % url)
def test_list_domains(self):
urls = ('/OS-FEDERATION/domains', '/auth/domains')
tokens = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
# NOTE(henry-nash): domain D does not appear in the expected results
# since it only had inherited roles (which only apply to projects
# within the domain)
domain_refs = (set([self.domainA['id']]),
set([self.domainA['id'],
self.domainB['id']]),
set([self.domainA['id'],
self.domainB['id'],
self.domainC['id']]))
for token, domains_ref in zip(tokens, domain_refs):
for url in urls:
r = self.get(url, token=token)
domains_resp = r.result['domains']
domains = set(p['id'] for p in domains_resp)
self.assertEqual(domains_ref, domains,
'match failed for url %s' % url)
@utils.wip('This will fail because of bug #1501032. The returned method'
'list should contain "saml2". This is documented in bug '
'1501032.')
def test_full_workflow(self):
"""Test 'standard' workflow for granting access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to one of available projects
"""
r = self._issue_unscoped_token()
token_resp = r.json_body['token']
# NOTE(lbragstad): Ensure only 'saml2' is in the method list.
self.assertListEqual(['saml2'], token_resp['methods'])
self.assertValidMappedUser(token_resp)
employee_unscoped_token_id = r.headers.get('X-Subject-Token')
r = self.get('/auth/projects', token=employee_unscoped_token_id)
projects = r.result['projects']
random_project = random.randint(0, len(projects)) - 1
project = projects[random_project]
v3_scope_request = self._scope_request(employee_unscoped_token_id,
'project', project['id'])
r = self.v3_create_token(v3_scope_request)
token_resp = r.result['token']
# FIXME(lbragstad): 'token' should be in the list of methods returned
# but it isn't. This is documented in bug 1501032.
self.assertIn('token', token_resp['methods'])
self.assertIn('saml2', token_resp['methods'])
self._check_project_scoped_token_attributes(token_resp, project['id'])
def test_workflow_with_groups_deletion(self):
"""Test full workflow with groups deletion before token scoping.
The test scenario is as follows:
- Create group ``group``
- Create and assign roles to ``group`` and ``project_all``
- Patch mapping rules for existing IdP so it issues group id
- Issue unscoped token with ``group``'s id
- Delete group ``group``
- Scope token to ``project_all``
- Expect HTTP 500 response
"""
# create group and role
group = unit.new_group_ref(domain_id=self.domainA['id'])
group = self.identity_api.create_group(group)
role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
# assign role to group and project_admins
self.assignment_api.create_grant(role['id'],
group_id=group['id'],
project_id=self.project_all['id'])
rules = {
'rules': [
{
'local': [
{
'group': {
'id': group['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'LastName',
'any_one_of': [
'Account'
]
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='TESTER_ASSERTION')
token_id = r.headers.get('X-Subject-Token')
# delete group
self.identity_api.delete_group(group['id'])
# scope token to project_all, expect HTTP 500
scoped_token = self._scope_request(
token_id, 'project',
self.project_all['id'])
self.v3_create_token(
scoped_token, expected_status=http_client.INTERNAL_SERVER_ERROR)
def test_lists_with_missing_group_in_backend(self):
"""Test a mapping that points to a group that does not exist
For explicit mappings, we expect the group to exist in the backend,
but for lists, specifically blacklists, a missing group is expected
as many groups will be specified by the IdP that are not Keystone
groups.
The test scenario is as follows:
- Create group ``EXISTS``
- Set mapping rules for existing IdP with a blacklist
that passes through as REMOTE_USER_GROUPS
- Issue unscoped token with on group ``EXISTS`` id in it
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
group = unit.new_group_ref(domain_id=domain_id, name='EXISTS')
group = self.identity_api.create_group(group)
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
def test_empty_blacklist_passess_all_values(self):
"""Test a mapping with empty blacklist specified
Not adding a ``blacklist`` keyword to the mapping rules has the same
effect as adding an empty ``blacklist``.
In both cases, the mapping engine will not discard any groups that are
associated with apache environment variables.
This test checks scenario where an empty blacklist was specified.
Expected result is to allow any value.
The test scenario is as follows:
- Create group ``EXISTS``
- Create group ``NO_EXISTS``
- Set mapping rules for existing IdP with a blacklist
that passes through as REMOTE_USER_GROUPS
- Issue unscoped token with groups ``EXISTS`` and ``NO_EXISTS``
assigned
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
group_exists = unit.new_group_ref(domain_id=domain_id, name='EXISTS')
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
group_no_exists = unit.new_group_ref(domain_id=domain_id,
name='NO_EXISTS')
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
"blacklist": []
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(group_ids), len(assigned_group_ids))
for group in assigned_group_ids:
self.assertIn(group['id'], group_ids)
def test_not_adding_blacklist_passess_all_values(self):
"""Test a mapping without blacklist specified.
Not adding a ``blacklist`` keyword to the mapping rules has the same
effect as adding an empty ``blacklist``. In both cases all values will
be accepted and passed.
This test checks scenario where an blacklist was not specified.
Expected result is to allow any value.
The test scenario is as follows:
- Create group ``EXISTS``
- Create group ``NO_EXISTS``
- Set mapping rules for existing IdP with a blacklist
that passes through as REMOTE_USER_GROUPS
- Issue unscoped token with on groups ``EXISTS`` and ``NO_EXISTS``
assigned
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
group_exists = unit.new_group_ref(domain_id=domain_id,
name='EXISTS')
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
group_no_exists = unit.new_group_ref(domain_id=domain_id,
name='NO_EXISTS')
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(group_ids), len(assigned_group_ids))
for group in assigned_group_ids:
self.assertIn(group['id'], group_ids)
def test_empty_whitelist_discards_all_values(self):
"""Test that empty whitelist blocks all the values
Not adding a ``whitelist`` keyword to the mapping value is different
than adding empty whitelist. The former case will simply pass all the
values, whereas the latter would discard all the values.
This test checks scenario where an empty whitelist was specified.
The expected result is that no groups are matched.
The test scenario is as follows:
- Create group ``EXISTS``
- Set mapping rules for existing IdP with an empty whitelist
that whould discard any values from the assertion
- Try issuing unscoped token, expect server to raise
``exception.MissingGroups`` as no groups were matched and ephemeral
user does not have any group assigned.
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
group = unit.new_group_ref(domain_id=domain_id, name='EXISTS')
group = self.identity_api.create_group(group)
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
"whitelist": []
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
self.assertRaises(exception.MissingGroups,
self._issue_unscoped_token,
assertion='UNMATCHED_GROUP_ASSERTION')
def test_not_setting_whitelist_accepts_all_values(self):
"""Test that not setting whitelist passes
Not adding a ``whitelist`` keyword to the mapping value is different
than adding empty whitelist. The former case will simply pass all the
values, whereas the latter would discard all the values.
This test checks a scenario where a ``whitelist`` was not specified.
Expected result is that no groups are ignored.
The test scenario is as follows:
- Create group ``EXISTS``
- Set mapping rules for existing IdP with an empty whitelist
that whould discard any values from the assertion
- Issue an unscoped token and make sure ephemeral user is a member of
two groups.
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
group_exists = unit.new_group_ref(domain_id=domain_id,
name='EXISTS')
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
group_no_exists = unit.new_group_ref(domain_id=domain_id,
name='NO_EXISTS')
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(group_ids), len(assigned_group_ids))
for group in assigned_group_ids:
self.assertIn(group['id'], group_ids)
def test_assertion_prefix_parameter(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` set to fixed, non default value,
issue an unscoped token from assertion EMPLOYEE_ASSERTION_PREFIXED.
Expect server to return unscoped token.
"""
self.config_fixture.config(group='federation',
assertion_prefix=self.ASSERTION_PREFIX)
r = self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION_PREFIXED')
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_assertion_prefix_parameter_expect_fail(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` default value set to empty string
issue an unscoped token from assertion EMPLOYEE_ASSERTION.
Next, configure ``assertion_prefix`` to value ``UserName``.
Try issuing unscoped token with EMPLOYEE_ASSERTION.
Expect server to raise exception.Unathorized exception.
"""
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
self.config_fixture.config(group='federation',
assertion_prefix='UserName')
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token)
def test_v2_auth_with_federation_token_fails(self):
"""Test that using a federation token with v2 auth fails.
If an admin sets up a federated Keystone environment, and a user
incorrectly configures a service (like Nova) to only use v2 auth, the
returned message should be informative.
"""
r = self._issue_unscoped_token()
token_id = r.headers.get('X-Subject-Token')
self.assertRaises(exception.Unauthorized,
self.token_provider_api.validate_v2_token,
token_id=token_id)
def test_unscoped_token_has_user_domain(self):
r = self._issue_unscoped_token()
self._check_domains_are_valid(r.json_body['token'])
def test_scoped_token_has_user_domain(self):
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
self._check_domains_are_valid(r.result['token'])
def test_issue_unscoped_token_for_local_user(self):
r = self._issue_unscoped_token(assertion='LOCAL_USER_ASSERTION')
token_resp = r.json_body['token']
self.assertListEqual(['saml2'], token_resp['methods'])
self.assertEqual(self.user['id'], token_resp['user']['id'])
self.assertEqual(self.user['name'], token_resp['user']['name'])
self.assertEqual(self.domain['id'], token_resp['user']['domain']['id'])
# Make sure the token is not scoped
self.assertNotIn('project', token_resp)
self.assertNotIn('domain', token_resp)
def test_issue_token_for_local_user_user_not_found(self):
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
assertion='ANOTHER_LOCAL_USER_ASSERTION')
class FernetFederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin):
AUTH_METHOD = 'token'
def load_fixtures(self, fixtures):
super(FernetFederatedTokenTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def config_overrides(self):
super(FernetFederatedTokenTests, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet')
self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
def auth_plugin_config_override(self):
methods = ['saml2', 'token', 'password']
super(FernetFederatedTokenTests,
self).auth_plugin_config_override(methods)
def test_federated_unscoped_token(self):
resp = self._issue_unscoped_token()
self.assertEqual(204, len(resp.headers['X-Subject-Token']))
self.assertValidMappedUser(resp.json_body['token'])
def test_federated_unscoped_token_with_multiple_groups(self):
assertion = 'ANOTHER_CUSTOMER_ASSERTION'
resp = self._issue_unscoped_token(assertion=assertion)
self.assertEqual(226, len(resp.headers['X-Subject-Token']))
self.assertValidMappedUser(resp.json_body['token'])
def test_validate_federated_unscoped_token(self):
resp = self._issue_unscoped_token()
unscoped_token = resp.headers.get('X-Subject-Token')
# assert that the token we received is valid
self.get('/auth/tokens/', headers={'X-Subject-Token': unscoped_token})
def test_fernet_full_workflow(self):
"""Test 'standard' workflow for granting Fernet access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to one of available projects
"""
resp = self._issue_unscoped_token()
self.assertValidMappedUser(resp.json_body['token'])
unscoped_token = resp.headers.get('X-Subject-Token')
resp = self.get('/auth/projects', token=unscoped_token)
projects = resp.result['projects']
random_project = random.randint(0, len(projects)) - 1
project = projects[random_project]
v3_scope_request = self._scope_request(unscoped_token,
'project', project['id'])
resp = self.v3_create_token(v3_scope_request)
token_resp = resp.result['token']
self._check_project_scoped_token_attributes(token_resp, project['id'])
class FederatedTokenTestsMethodToken(FederatedTokenTests):
"""Test federation operation with unified scoping auth method.
Test all the operations with auth method set to ``token`` as a new, unified
way for scoping all the tokens.
"""
AUTH_METHOD = 'token'
def auth_plugin_config_override(self):
methods = ['saml2', 'token']
super(FederatedTokenTests,
self).auth_plugin_config_override(methods)
@utils.wip('This will fail because of bug #1501032. The returned method'
'list should contain "saml2". This is documented in bug '
'1501032.')
def test_full_workflow(self):
"""Test 'standard' workflow for granting access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to one of available projects
"""
r = self._issue_unscoped_token()
token_resp = r.json_body['token']
# NOTE(lbragstad): Ensure only 'saml2' is in the method list.
self.assertListEqual(['saml2'], token_resp['methods'])
self.assertValidMappedUser(token_resp)
employee_unscoped_token_id = r.headers.get('X-Subject-Token')
r = self.get('/auth/projects', token=employee_unscoped_token_id)
projects = r.result['projects']
random_project = random.randint(0, len(projects)) - 1
project = projects[random_project]
v3_scope_request = self._scope_request(employee_unscoped_token_id,
'project', project['id'])
r = self.v3_authenticate_token(v3_scope_request)
token_resp = r.result['token']
self.assertIn('token', token_resp['methods'])
self.assertIn('saml2', token_resp['methods'])
self._check_project_scoped_token_attributes(token_resp, project['id'])
class FederatedUserTests(test_v3.RestfulTestCase, FederatedSetupMixin):
"""Tests for federated users
Tests new shadow users functionality
"""
def auth_plugin_config_override(self):
methods = ['saml2']
super(FederatedUserTests, self).auth_plugin_config_override(methods)
def setUp(self):
super(FederatedUserTests, self).setUp()
def load_fixtures(self, fixtures):
super(FederatedUserTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def test_user_id_persistense(self):
"""Ensure user_id is persistend for multiple federated authn calls."""
r = self._issue_unscoped_token()
user_id = r.json_body['token']['user']['id']
r = self._issue_unscoped_token()
user_id2 = r.json_body['token']['user']['id']
self.assertEqual(user_id, user_id2)
class JsonHomeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin):
JSON_HOME_DATA = {
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/'
'1.0/rel/identity_provider': {
'href-template': '/OS-FEDERATION/identity_providers/{idp_id}',
'href-vars': {
'idp_id': 'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-FEDERATION/1.0/param/idp_id'
},
},
}
def _is_xmlsec1_installed():
p = subprocess.Popen(
['which', 'xmlsec1'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# invert the return code
return not bool(p.wait())
def _load_xml(filename):
with open(os.path.join(XMLDIR, filename), 'r') as xml:
return xml.read()
class SAMLGenerationTests(test_v3.RestfulTestCase):
SP_AUTH_URL = ('http://beta.com:5000/v3/OS-FEDERATION/identity_providers'
'/BETA/protocols/saml2/auth')
ASSERTION_FILE = 'signed_saml2_assertion.xml'
# The values of the following variables match the attributes values found
# in ASSERTION_FILE
ISSUER = 'https://acme.com/FIM/sps/openstack/saml20'
RECIPIENT = 'http://beta.com/Shibboleth.sso/SAML2/POST'
SUBJECT = 'test_user'
SUBJECT_DOMAIN = 'user_domain'
ROLES = ['admin', 'member']
PROJECT = 'development'
PROJECT_DOMAIN = 'project_domain'
SAML_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2'
ECP_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2/ecp'
ASSERTION_VERSION = "2.0"
SERVICE_PROVDIER_ID = 'ACME'
def sp_ref(self):
ref = {
'auth_url': self.SP_AUTH_URL,
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': self.RECIPIENT,
'relay_state_prefix': CONF.saml.relay_state_prefix,
}
return ref
def setUp(self):
super(SAMLGenerationTests, self).setUp()
self.signed_assertion = saml2.create_class_from_xml_string(
saml.Assertion, _load_xml(self.ASSERTION_FILE))
self.sp = self.sp_ref()
url = '/OS-FEDERATION/service_providers/' + self.SERVICE_PROVDIER_ID
self.put(url, body={'service_provider': self.sp},
expected_status=http_client.CREATED)
def test_samlize_token_values(self):
"""Test the SAML generator produces a SAML object.
Test the SAML generator directly by passing known arguments, the result
should be a SAML object that consistently includes attributes based on
the known arguments that were passed in.
"""
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
assertion = response.assertion
self.assertIsNotNone(assertion)
self.assertIsInstance(assertion, saml.Assertion)
issuer = response.issuer
self.assertEqual(self.RECIPIENT, response.destination)
self.assertEqual(self.ISSUER, issuer.text)
user_attribute = assertion.attribute_statement[0].attribute[0]
self.assertEqual(self.SUBJECT, user_attribute.attribute_value[0].text)
user_domain_attribute = (
assertion.attribute_statement[0].attribute[1])
self.assertEqual(self.SUBJECT_DOMAIN,
user_domain_attribute.attribute_value[0].text)
role_attribute = assertion.attribute_statement[0].attribute[2]
for attribute_value in role_attribute.attribute_value:
self.assertIn(attribute_value.text, self.ROLES)
project_attribute = assertion.attribute_statement[0].attribute[3]
self.assertEqual(self.PROJECT,
project_attribute.attribute_value[0].text)
project_domain_attribute = (
assertion.attribute_statement[0].attribute[4])
self.assertEqual(self.PROJECT_DOMAIN,
project_domain_attribute.attribute_value[0].text)
def test_verify_assertion_object(self):
"""Test that the Assertion object is built properly.
The Assertion doesn't need to be signed in this test, so
_sign_assertion method is patched and doesn't alter the assertion.
"""
with mock.patch.object(keystone_idp, '_sign_assertion',
side_effect=lambda x: x):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
assertion = response.assertion
self.assertEqual(self.ASSERTION_VERSION, assertion.version)
def test_valid_saml_xml(self):
"""Test the generated SAML object can become valid XML.
Test the generator directly by passing known arguments, the result
should be a SAML object that consistently includes attributes based on
the known arguments that were passed in.
"""
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
saml_str = response.to_string()
response = etree.fromstring(saml_str)
issuer = response[0]
assertion = response[2]
self.assertEqual(self.RECIPIENT, response.get('Destination'))
self.assertEqual(self.ISSUER, issuer.text)
user_attribute = assertion[4][0]
self.assertEqual(self.SUBJECT, user_attribute[0].text)
user_domain_attribute = assertion[4][1]
self.assertEqual(self.SUBJECT_DOMAIN, user_domain_attribute[0].text)
role_attribute = assertion[4][2]
for attribute_value in role_attribute:
self.assertIn(attribute_value.text, self.ROLES)
project_attribute = assertion[4][3]
self.assertEqual(self.PROJECT, project_attribute[0].text)
project_domain_attribute = assertion[4][4]
self.assertEqual(self.PROJECT_DOMAIN, project_domain_attribute[0].text)
def test_assertion_using_explicit_namespace_prefixes(self):
def mocked_subprocess_check_output(*popenargs, **kwargs):
# the last option is the assertion file to be signed
filename = popenargs[0][-1]
with open(filename, 'r') as f:
assertion_content = f.read()
# since we are not testing the signature itself, we can return
# the assertion as is without signing it
return assertion_content
with mock.patch.object(subprocess, 'check_output',
side_effect=mocked_subprocess_check_output):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
assertion_xml = response.assertion.to_string()
# make sure we have the proper tag and prefix for the assertion
# namespace
self.assertIn('<saml:Assertion', assertion_xml)
self.assertIn('xmlns:saml="' + saml2.NAMESPACE + '"',
assertion_xml)
self.assertIn('xmlns:xmldsig="' + xmldsig.NAMESPACE + '"',
assertion_xml)
def test_saml_signing(self):
"""Test that the SAML generator produces a SAML object.
Test the SAML generator directly by passing known arguments, the result
should be a SAML object that consistently includes attributes based on
the known arguments that were passed in.
"""
if not _is_xmlsec1_installed():
self.skipTest('xmlsec1 is not installed')
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT, self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
signature = response.assertion.signature
self.assertIsNotNone(signature)
self.assertIsInstance(signature, xmldsig.Signature)
idp_public_key = sigver.read_cert_from_file(CONF.saml.certfile, 'pem')
cert_text = signature.key_info.x509_data[0].x509_certificate.text
# NOTE(stevemar): Rather than one line of text, the certificate is
# printed with newlines for readability, we remove these so we can
# match it with the key that we used.
cert_text = cert_text.replace(os.linesep, '')
self.assertEqual(idp_public_key, cert_text)
def _create_generate_saml_request(self, token_id, sp_id):
return {
"auth": {
"identity": {
"methods": [
"token"
],
"token": {
"id": token_id
}
},
"scope": {
"service_provider": {
"id": sp_id
}
}
}
}
def _fetch_valid_token(self):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
resp = self.v3_create_token(auth_data)
token_id = resp.headers.get('X-Subject-Token')
return token_id
def _fetch_domain_scoped_token(self):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
user_domain_id=self.domain['id'])
resp = self.v3_create_token(auth_data)
token_id = resp.headers.get('X-Subject-Token')
return token_id
def test_not_project_scoped_token(self):
"""Ensure SAML generation fails when passing domain-scoped tokens.
The server should return a 403 Forbidden Action.
"""
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_domain_scoped_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.FORBIDDEN)
def test_generate_saml_route(self):
"""Test that the SAML generation endpoint produces XML.
The SAML endpoint /v3/auth/OS-FEDERATION/saml2 should take as input,
a scoped token ID, and a Service Provider ID.
The controller should fetch details about the user from the token,
and details about the service provider from its ID.
This should be enough information to invoke the SAML generator and
provide a valid SAML (XML) document back.
"""
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
http_response = self.post(self.SAML_GENERATION_ROUTE, body=body,
response_content_type='text/xml',
expected_status=http_client.OK)
response = etree.fromstring(http_response.result)
issuer = response[0]
assertion = response[2]
self.assertEqual(self.RECIPIENT, response.get('Destination'))
self.assertEqual(self.ISSUER, issuer.text)
# NOTE(stevemar): We should test this against expected values,
# but the self.xyz attribute names are uuids, and we mock out
# the result. Ideally we should update the mocked result with
# some known data, and create the roles/project/user before
# these tests run.
user_attribute = assertion[4][0]
self.assertIsInstance(user_attribute[0].text, str)
user_domain_attribute = assertion[4][1]
self.assertIsInstance(user_domain_attribute[0].text, str)
role_attribute = assertion[4][2]
self.assertIsInstance(role_attribute[0].text, str)
project_attribute = assertion[4][3]
self.assertIsInstance(project_attribute[0].text, str)
project_domain_attribute = assertion[4][4]
self.assertIsInstance(project_domain_attribute[0].text, str)
def test_invalid_scope_body(self):
"""Test that missing the scope in request body raises an exception.
Raises exception.SchemaValidationError() - error 400 Bad Request
"""
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
del body['auth']['scope']
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.BAD_REQUEST)
def test_invalid_token_body(self):
"""Test that missing the token in request body raises an exception.
Raises exception.SchemaValidationError() - error 400 Bad Request
"""
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
del body['auth']['identity']['token']
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.BAD_REQUEST)
def test_sp_not_found(self):
"""Test SAML generation with an invalid service provider ID.
Raises exception.ServiceProviderNotFound() - error Not Found 404
"""
sp_id = uuid.uuid4().hex
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id, sp_id)
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.NOT_FOUND)
def test_sp_disabled(self):
"""Try generating assertion for disabled Service Provider."""
# Disable Service Provider
sp_ref = {'enabled': False}
self.federation_api.update_sp(self.SERVICE_PROVDIER_ID, sp_ref)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.FORBIDDEN)
def test_token_not_found(self):
"""Test that an invalid token in the request body raises an exception.
Raises exception.TokenNotFound() - error Not Found 404
"""
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.NOT_FOUND)
def test_generate_ecp_route(self):
"""Test that the ECP generation endpoint produces XML.
The ECP endpoint /v3/auth/OS-FEDERATION/saml2/ecp should take the same
input as the SAML generation endpoint (scoped token ID + Service
Provider ID).
The controller should return a SAML assertion that is wrapped in a
SOAP envelope.
"""
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
http_response = self.post(self.ECP_GENERATION_ROUTE, body=body,
response_content_type='text/xml',
expected_status=http_client.OK)
env_response = etree.fromstring(http_response.result)
header = env_response[0]
# Verify the relay state starts with 'ss:mem'
prefix = CONF.saml.relay_state_prefix
self.assertThat(header[0].text, matchers.StartsWith(prefix))
# Verify that the content in the body matches the expected assertion
body = env_response[1]
response = body[0]
issuer = response[0]
assertion = response[2]
self.assertEqual(self.RECIPIENT, response.get('Destination'))
self.assertEqual(self.ISSUER, issuer.text)
user_attribute = assertion[4][0]
self.assertIsInstance(user_attribute[0].text, str)
user_domain_attribute = assertion[4][1]
self.assertIsInstance(user_domain_attribute[0].text, str)
role_attribute = assertion[4][2]
self.assertIsInstance(role_attribute[0].text, str)
project_attribute = assertion[4][3]
self.assertIsInstance(project_attribute[0].text, str)
project_domain_attribute = assertion[4][4]
self.assertIsInstance(project_domain_attribute[0].text, str)
@mock.patch('saml2.create_class_from_xml_string')
@mock.patch('oslo_utils.fileutils.write_to_tempfile')
@mock.patch.object(subprocess, 'check_output')
def test__sign_assertion(self, check_output_mock,
write_to_tempfile_mock, create_class_mock):
write_to_tempfile_mock.return_value = 'tmp_path'
check_output_mock.return_value = 'fakeoutput'
keystone_idp._sign_assertion(self.signed_assertion)
create_class_mock.assert_called_with(saml.Assertion, 'fakeoutput')
@mock.patch('oslo_utils.fileutils.write_to_tempfile')
@mock.patch.object(subprocess, 'check_output')
def test__sign_assertion_exc(self, check_output_mock,
write_to_tempfile_mock):
# If the command fails the command output is logged.
write_to_tempfile_mock.return_value = 'tmp_path'
sample_returncode = 1
sample_output = self.getUniqueString()
check_output_mock.side_effect = subprocess.CalledProcessError(
returncode=sample_returncode, cmd=CONF.saml.xmlsec1_binary,
output=sample_output)
logger_fixture = self.useFixture(fixtures.LoggerFixture())
self.assertRaises(exception.SAMLSigningError,
keystone_idp._sign_assertion,
self.signed_assertion)
expected_log = (
"Error when signing assertion, reason: Command '%s' returned "
"non-zero exit status %s %s\n" %
(CONF.saml.xmlsec1_binary, sample_returncode, sample_output))
self.assertEqual(expected_log, logger_fixture.output)
@mock.patch('oslo_utils.fileutils.write_to_tempfile')
def test__sign_assertion_fileutils_exc(self, write_to_tempfile_mock):
exception_msg = 'fake'
write_to_tempfile_mock.side_effect = Exception(exception_msg)
logger_fixture = self.useFixture(fixtures.LoggerFixture())
self.assertRaises(exception.SAMLSigningError,
keystone_idp._sign_assertion,
self.signed_assertion)
expected_log = (
'Error when signing assertion, reason: %s\n' % exception_msg)
self.assertEqual(expected_log, logger_fixture.output)
class IdPMetadataGenerationTests(test_v3.RestfulTestCase):
"""A class for testing Identity Provider Metadata generation."""
METADATA_URL = '/OS-FEDERATION/saml2/metadata'
def setUp(self):
super(IdPMetadataGenerationTests, self).setUp()
self.generator = keystone_idp.MetadataGenerator()
def config_overrides(self):
super(IdPMetadataGenerationTests, self).config_overrides()
self.config_fixture.config(
group='saml',
idp_entity_id=federation_fixtures.IDP_ENTITY_ID,
idp_sso_endpoint=federation_fixtures.IDP_SSO_ENDPOINT,
idp_organization_name=federation_fixtures.IDP_ORGANIZATION_NAME,
idp_organization_display_name=(
federation_fixtures.IDP_ORGANIZATION_DISPLAY_NAME),
idp_organization_url=federation_fixtures.IDP_ORGANIZATION_URL,
idp_contact_company=federation_fixtures.IDP_CONTACT_COMPANY,
idp_contact_name=federation_fixtures.IDP_CONTACT_GIVEN_NAME,
idp_contact_surname=federation_fixtures.IDP_CONTACT_SURNAME,
idp_contact_email=federation_fixtures.IDP_CONTACT_EMAIL,
idp_contact_telephone=(
federation_fixtures.IDP_CONTACT_TELEPHONE_NUMBER),
idp_contact_type=federation_fixtures.IDP_CONTACT_TYPE)
def test_check_entity_id(self):
metadata = self.generator.generate_metadata()
self.assertEqual(federation_fixtures.IDP_ENTITY_ID, metadata.entity_id)
def test_metadata_validity(self):
"""Call md.EntityDescriptor method that does internal verification."""
self.generator.generate_metadata().verify()
def test_serialize_metadata_object(self):
"""Check whether serialization doesn't raise any exceptions."""
self.generator.generate_metadata().to_string()
# TODO(marek-denis): Check values here
def test_check_idp_sso(self):
metadata = self.generator.generate_metadata()
idpsso_descriptor = metadata.idpsso_descriptor
self.assertIsNotNone(metadata.idpsso_descriptor)
self.assertEqual(federation_fixtures.IDP_SSO_ENDPOINT,
idpsso_descriptor.single_sign_on_service.location)
self.assertIsNotNone(idpsso_descriptor.organization)
organization = idpsso_descriptor.organization
self.assertEqual(federation_fixtures.IDP_ORGANIZATION_DISPLAY_NAME,
organization.organization_display_name.text)
self.assertEqual(federation_fixtures.IDP_ORGANIZATION_NAME,
organization.organization_name.text)
self.assertEqual(federation_fixtures.IDP_ORGANIZATION_URL,
organization.organization_url.text)
self.assertIsNotNone(idpsso_descriptor.contact_person)
contact_person = idpsso_descriptor.contact_person
self.assertEqual(federation_fixtures.IDP_CONTACT_GIVEN_NAME,
contact_person.given_name.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_SURNAME,
contact_person.sur_name.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_EMAIL,
contact_person.email_address.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_TELEPHONE_NUMBER,
contact_person.telephone_number.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_TYPE,
contact_person.contact_type)
def test_metadata_no_organization(self):
self.config_fixture.config(
group='saml',
idp_organization_display_name=None,
idp_organization_url=None,
idp_organization_name=None)
metadata = self.generator.generate_metadata()
idpsso_descriptor = metadata.idpsso_descriptor
self.assertIsNotNone(metadata.idpsso_descriptor)
self.assertIsNone(idpsso_descriptor.organization)
self.assertIsNotNone(idpsso_descriptor.contact_person)
def test_metadata_no_contact_person(self):
self.config_fixture.config(
group='saml',
idp_contact_name=None,
idp_contact_surname=None,
idp_contact_email=None,
idp_contact_telephone=None)
metadata = self.generator.generate_metadata()
idpsso_descriptor = metadata.idpsso_descriptor
self.assertIsNotNone(metadata.idpsso_descriptor)
self.assertIsNotNone(idpsso_descriptor.organization)
self.assertEqual([], idpsso_descriptor.contact_person)
def test_metadata_invalid_contact_type(self):
self.config_fixture.config(
group='saml',
idp_contact_type="invalid")
self.assertRaises(exception.ValidationError,
self.generator.generate_metadata)
def test_metadata_invalid_idp_sso_endpoint(self):
self.config_fixture.config(
group='saml',
idp_sso_endpoint=None)
self.assertRaises(exception.ValidationError,
self.generator.generate_metadata)
def test_metadata_invalid_idp_entity_id(self):
self.config_fixture.config(
group='saml',
idp_entity_id=None)
self.assertRaises(exception.ValidationError,
self.generator.generate_metadata)
def test_get_metadata_with_no_metadata_file_configured(self):
self.get(self.METADATA_URL,
expected_status=http_client.INTERNAL_SERVER_ERROR)
def test_get_metadata(self):
self.config_fixture.config(
group='saml', idp_metadata_path=XMLDIR + '/idp_saml2_metadata.xml')
r = self.get(self.METADATA_URL, response_content_type='text/xml')
self.assertEqual('text/xml', r.headers.get('Content-Type'))
reference_file = _load_xml('idp_saml2_metadata.xml')
self.assertEqual(reference_file, r.result)
class ServiceProviderTests(test_v3.RestfulTestCase):
"""A test class for Service Providers."""
MEMBER_NAME = 'service_provider'
COLLECTION_NAME = 'service_providers'
SERVICE_PROVIDER_ID = 'ACME'
SP_KEYS = ['auth_url', 'id', 'enabled', 'description',
'relay_state_prefix', 'sp_url']
def setUp(self):
super(ServiceProviderTests, self).setUp()
# Add a Service Provider
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.SP_REF = self.sp_ref()
self.SERVICE_PROVIDER = self.put(
url, body={'service_provider': self.SP_REF},
expected_status=http_client.CREATED).result
def sp_ref(self):
ref = {
'auth_url': 'https://' + uuid.uuid4().hex + '.com',
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': 'https://' + uuid.uuid4().hex + '.com',
'relay_state_prefix': CONF.saml.relay_state_prefix
}
return ref
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/service_providers/' + str(suffix)
return '/OS-FEDERATION/service_providers'
def _create_default_sp(self, body=None):
"""Create default Service Provider."""
url = self.base_url(suffix=uuid.uuid4().hex)
if body is None:
body = self.sp_ref()
resp = self.put(url, body={'service_provider': body},
expected_status=http_client.CREATED)
return resp
def test_get_service_provider(self):
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
resp = self.get(url)
self.assertValidEntity(resp.result['service_provider'],
keys_to_check=self.SP_KEYS)
def test_get_service_provider_fail(self):
url = self.base_url(suffix=uuid.uuid4().hex)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_create_service_provider(self):
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
resp = self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
self.assertValidEntity(resp.result['service_provider'],
keys_to_check=self.SP_KEYS)
def test_create_sp_relay_state_default(self):
"""Create an SP without relay state, should default to `ss:mem`."""
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
del sp['relay_state_prefix']
resp = self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
sp_result = resp.result['service_provider']
self.assertEqual(CONF.saml.relay_state_prefix,
sp_result['relay_state_prefix'])
def test_create_sp_relay_state_non_default(self):
"""Create an SP with custom relay state."""
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
non_default_prefix = uuid.uuid4().hex
sp['relay_state_prefix'] = non_default_prefix
resp = self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
sp_result = resp.result['service_provider']
self.assertEqual(non_default_prefix,
sp_result['relay_state_prefix'])
def test_create_service_provider_fail(self):
"""Try adding SP object with unallowed attribute."""
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
sp[uuid.uuid4().hex] = uuid.uuid4().hex
self.put(url, body={'service_provider': sp},
expected_status=http_client.BAD_REQUEST)
def test_list_service_providers(self):
"""Test listing of service provider objects.
Add two new service providers. List all available service providers.
Expect to get list of three service providers (one created by setUp())
Test if attributes match.
"""
ref_service_providers = {
uuid.uuid4().hex: self.sp_ref(),
uuid.uuid4().hex: self.sp_ref(),
}
for id, sp in ref_service_providers.items():
url = self.base_url(suffix=id)
self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
# Insert ids into service provider object, we will compare it with
# responses from server and those include 'id' attribute.
ref_service_providers[self.SERVICE_PROVIDER_ID] = self.SP_REF
for id, sp in ref_service_providers.items():
sp['id'] = id
url = self.base_url()
resp = self.get(url)
service_providers = resp.result
for service_provider in service_providers['service_providers']:
id = service_provider['id']
self.assertValidEntity(
service_provider, ref=ref_service_providers[id],
keys_to_check=self.SP_KEYS)
def test_update_service_provider(self):
"""Update existing service provider.
Update default existing service provider and make sure it has been
properly changed.
"""
new_sp_ref = self.sp_ref()
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
resp = self.patch(url, body={'service_provider': new_sp_ref})
patch_result = resp.result
new_sp_ref['id'] = self.SERVICE_PROVIDER_ID
self.assertValidEntity(patch_result['service_provider'],
ref=new_sp_ref,
keys_to_check=self.SP_KEYS)
resp = self.get(url)
get_result = resp.result
self.assertDictEqual(patch_result['service_provider'],
get_result['service_provider'])
def test_update_service_provider_immutable_parameters(self):
"""Update immutable attributes in service provider.
In this particular case the test will try to change ``id`` attribute.
The server should return an HTTP 403 Forbidden error code.
"""
new_sp_ref = {'id': uuid.uuid4().hex}
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=http_client.BAD_REQUEST)
def test_update_service_provider_unknown_parameter(self):
new_sp_ref = self.sp_ref()
new_sp_ref[uuid.uuid4().hex] = uuid.uuid4().hex
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=http_client.BAD_REQUEST)
def test_update_service_provider_returns_not_found(self):
new_sp_ref = self.sp_ref()
new_sp_ref['description'] = uuid.uuid4().hex
url = self.base_url(suffix=uuid.uuid4().hex)
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=http_client.NOT_FOUND)
def test_update_sp_relay_state(self):
"""Update an SP with custom relay state."""
new_sp_ref = self.sp_ref()
non_default_prefix = uuid.uuid4().hex
new_sp_ref['relay_state_prefix'] = non_default_prefix
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
resp = self.patch(url, body={'service_provider': new_sp_ref})
sp_result = resp.result['service_provider']
self.assertEqual(non_default_prefix,
sp_result['relay_state_prefix'])
def test_delete_service_provider(self):
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.delete(url)
def test_delete_service_provider_returns_not_found(self):
url = self.base_url(suffix=uuid.uuid4().hex)
self.delete(url, expected_status=http_client.NOT_FOUND)
def test_filter_list_sp_by_id(self):
def get_id(resp):
sp = resp.result.get('service_provider')
return sp.get('id')
sp1_id = get_id(self._create_default_sp())
sp2_id = get_id(self._create_default_sp())
# list the SP, should get SPs.
url = self.base_url()
resp = self.get(url)
sps = resp.result.get('service_providers')
entities_ids = [e['id'] for e in sps]
self.assertIn(sp1_id, entities_ids)
self.assertIn(sp2_id, entities_ids)
# filter the SP by 'id'. Only SP1 should appear.
url = self.base_url() + '?id=' + sp1_id
resp = self.get(url)
sps = resp.result.get('service_providers')
entities_ids = [e['id'] for e in sps]
self.assertIn(sp1_id, entities_ids)
self.assertNotIn(sp2_id, entities_ids)
def test_filter_list_sp_by_enabled(self):
def get_id(resp):
sp = resp.result.get('service_provider')
return sp.get('id')
sp1_id = get_id(self._create_default_sp())
sp2_ref = self.sp_ref()
sp2_ref['enabled'] = False
sp2_id = get_id(self._create_default_sp(body=sp2_ref))
# list the SP, should get two SPs.
url = self.base_url()
resp = self.get(url)
sps = resp.result.get('service_providers')
entities_ids = [e['id'] for e in sps]
self.assertIn(sp1_id, entities_ids)
self.assertIn(sp2_id, entities_ids)
# filter the SP by 'enabled'. Only SP1 should appear.
url = self.base_url() + '?enabled=True'
resp = self.get(url)
sps = resp.result.get('service_providers')
entities_ids = [e['id'] for e in sps]
self.assertIn(sp1_id, entities_ids)
self.assertNotIn(sp2_id, entities_ids)
class WebSSOTests(FederatedTokenTests):
"""A class for testing Web SSO."""
SSO_URL = '/auth/OS-FEDERATION/websso/'
SSO_TEMPLATE_NAME = 'sso_callback_template.html'
SSO_TEMPLATE_PATH = os.path.join(core.dirs.etc(), SSO_TEMPLATE_NAME)
TRUSTED_DASHBOARD = 'http://horizon.com'
ORIGIN = urllib.parse.quote_plus(TRUSTED_DASHBOARD)
PROTOCOL_REMOTE_ID_ATTR = uuid.uuid4().hex
def setUp(self):
super(WebSSOTests, self).setUp()
self.api = federation_controllers.Auth()
def config_overrides(self):
super(WebSSOTests, self).config_overrides()
self.config_fixture.config(
group='federation',
trusted_dashboard=[self.TRUSTED_DASHBOARD],
sso_callback_template=self.SSO_TEMPLATE_PATH,
remote_id_attribute=self.REMOTE_ID_ATTR)
def test_render_callback_template(self):
token_id = uuid.uuid4().hex
resp = self.api.render_html_response(self.TRUSTED_DASHBOARD, token_id)
self.assertIn(token_id, resp.body)
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
def test_federated_sso_auth(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
resp = self.api.federated_sso_auth(context, self.PROTOCOL)
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
def test_get_sso_origin_host_case_insensitive(self):
# test lowercase hostname in trusted_dashboard
context = {
'query_string': {
'origin': "http://horizon.com",
},
}
host = self.api._get_sso_origin_host(context)
self.assertEqual("http://horizon.com", host)
# test uppercase hostname in trusted_dashboard
self.config_fixture.config(group='federation',
trusted_dashboard=['http://Horizon.com'])
host = self.api._get_sso_origin_host(context)
self.assertEqual("http://horizon.com", host)
def test_federated_sso_auth_with_protocol_specific_remote_id(self):
self.config_fixture.config(
group=self.PROTOCOL,
remote_id_attribute=self.PROTOCOL_REMOTE_ID_ATTR)
environment = {self.PROTOCOL_REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
resp = self.api.federated_sso_auth(context, self.PROTOCOL)
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
def test_federated_sso_auth_bad_remote_id(self):
environment = {self.REMOTE_ID_ATTR: self.IDP}
context = {'environment': environment}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
self.assertRaises(exception.IdentityProviderNotFound,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_missing_query(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
self.assertRaises(exception.ValidationError,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_missing_query_bad_remote_id(self):
environment = {self.REMOTE_ID_ATTR: self.IDP}
context = {'environment': environment}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
self.assertRaises(exception.ValidationError,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_untrusted_dashboard(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
query_string = {'origin': uuid.uuid4().hex}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
self.assertRaises(exception.Unauthorized,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_untrusted_dashboard_bad_remote_id(self):
environment = {self.REMOTE_ID_ATTR: self.IDP}
context = {'environment': environment}
query_string = {'origin': uuid.uuid4().hex}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
self.assertRaises(exception.Unauthorized,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_missing_remote_id(self):
context = {'environment': {}}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
self.assertRaises(exception.Unauthorized,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_identity_provider_specific_federated_authentication(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
resp = self.api.federated_idp_specific_sso_auth(context,
self.idp['id'],
self.PROTOCOL)
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
class K2KServiceCatalogTests(test_v3.RestfulTestCase):
SP1 = 'SP1'
SP2 = 'SP2'
SP3 = 'SP3'
def setUp(self):
super(K2KServiceCatalogTests, self).setUp()
sp = self.sp_ref()
self.federation_api.create_sp(self.SP1, sp)
self.sp_alpha = {self.SP1: sp}
sp = self.sp_ref()
self.federation_api.create_sp(self.SP2, sp)
self.sp_beta = {self.SP2: sp}
sp = self.sp_ref()
self.federation_api.create_sp(self.SP3, sp)
self.sp_gamma = {self.SP3: sp}
self.token_v3_helper = token_common.V3TokenDataHelper()
def sp_response(self, id, ref):
ref.pop('enabled')
ref.pop('description')
ref.pop('relay_state_prefix')
ref['id'] = id
return ref
def sp_ref(self):
ref = {
'auth_url': uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': uuid.uuid4().hex,
'relay_state_prefix': CONF.saml.relay_state_prefix,
}
return ref
def _validate_service_providers(self, token, ref):
token_data = token['token']
self.assertIn('service_providers', token_data)
self.assertIsNotNone(token_data['service_providers'])
service_providers = token_data.get('service_providers')
self.assertEqual(len(ref), len(service_providers))
for entity in service_providers:
id = entity.get('id')
ref_entity = self.sp_response(id, ref.get(id))
self.assertDictEqual(entity, ref_entity)
def test_service_providers_in_token(self):
"""Check if service providers are listed in service catalog."""
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
ref = {}
for r in (self.sp_alpha, self.sp_beta, self.sp_gamma):
ref.update(r)
self._validate_service_providers(token, ref)
def test_service_provides_in_token_disabled_sp(self):
"""Test behaviour with disabled service providers.
Disabled service providers should not be listed in the service
catalog.
"""
# disable service provider ALPHA
sp_ref = {'enabled': False}
self.federation_api.update_sp(self.SP1, sp_ref)
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
ref = {}
for r in (self.sp_beta, self.sp_gamma):
ref.update(r)
self._validate_service_providers(token, ref)
def test_no_service_providers_in_token(self):
"""Test service catalog with disabled service providers.
There should be no entry ``service_providers`` in the catalog.
Test passes providing no attribute was raised.
"""
sp_ref = {'enabled': False}
for sp in (self.SP1, self.SP2, self.SP3):
self.federation_api.update_sp(sp, sp_ref)
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
self.assertNotIn('service_providers', token['token'],
message=('Expected Service Catalog not to have '
'service_providers'))
| apache-2.0 | -3,845,853,793,140,106,000 | 39.458141 | 79 | 0.530533 | false |
awlange/brainsparks | src/calrissian/regularization/particle_regularize_l2.py | 1 | 5014 | import numpy as np
class ParticleRegularizeL2(object):
"""
L2 regularizer for charges
"""
def __init__(self, coeff_lambda=0.0, zeta=8.0):
self.coeff_lambda = coeff_lambda
self.zeta = zeta
self.n = 1
def cost(self, particle_input, layers):
c = 0.0
# c = np.sum(particle_input.q * particle_input.q)
# # c = np.sum(particle_input.rx * particle_input.rx + particle_input.ry * particle_input.ry + particle_input.rz * particle_input.rz)
# for layer in layers:
# # c += np.sum(layer.q * layer.q) + np.sum(layer.b * layer.b)
# # c += np.sum(layer.q * layer.q)
# # c += np.sum(layer.rx * layer.rx + layer.ry * layer.ry + layer.rz * layer.rz)
#
# # Layer inter-particle repulsion
# for i in range(layer.output_size):
# rx_i = layer.rx[i]
# ry_i = layer.ry[i]
# rz_i = layer.rz[i]
# for j in range(i+1, layer.output_size):
# dx = layer.rx[j] - rx_i
# dy = layer.ry[j] - ry_i
# dz = layer.rz[j] - rz_i
# d2 = dx*dx + dy*dy + dz*dz
# c += np.exp(-self.zeta * d2)
#
# n = layer.output_size
# c /= (n * (n-1)) / 2
# # Input layer inter-particle repulsion
# for i in range(particle_input.output_size):
# rx_i = particle_input.rx[i]
# ry_i = particle_input.ry[i]
# rz_i = particle_input.rz[i]
# for j in range(i+1, particle_input.output_size):
# dx = particle_input.rx[j] - rx_i
# dy = particle_input.ry[j] - ry_i
# dz = particle_input.rz[j] - rz_i
# d2 = dx*dx + dy*dy + dz*dz
# c += np.exp(-self.zeta * d2)
# c /= n
# Compute the matrices
r = particle_input.get_rxyz()
for i, layer in enumerate(layers):
w = layer.compute_w(r)
# c += np.sum(w * w)
c += np.mean(w * w)
r = layer.get_rxyz()
return self.coeff_lambda * c
def cost_gradient(self, particle_input, layers, dc_dq, dc_db, dc_dr):
# dc_dr_x = dc_dr[0]
# dc_dr_y = dc_dr[1]
# dc_dr_z = dc_dr[2]
#
# two_lambda = 2.0 * self.coeff_lambda
#
# # # dc_dq[0] += two_lambda * particle_input.q
# # # dc_dr_x[0] += two_lambda * particle_input.rx
# # # dc_dr_y[0] += two_lambda * particle_input.ry
# # # dc_dr_z[0] += two_lambda * particle_input.rz
# for l, layer in enumerate(layers):
# # dc_dq[l] += two_lambda * layer.q
# # dc_db[l] += two_lambda * layer.b
# # dc_dr_x[l+1] += two_lambda * layer.rx
# # dc_dr_y[l+1] += two_lambda * layer.ry
# # dc_dr_z[l+1] += two_lambda * layer.rz
#
# n = layer.output_size
# n = (n * (n-1)) / 2
# for i in range(layer.output_size):
# rx_i = layer.rx[i]
# ry_i = layer.ry[i]
# rz_i = layer.rz[i]
# for j in range(i+1, layer.output_size):
# dx = layer.rx[j] - rx_i
# dy = layer.ry[j] - ry_i
# dz = layer.rz[j] - rz_i
# d2 = dx*dx + dy*dy + dz*dz
# # tmp = two_lambda * self.zeta * np.exp(-self.zeta * d2)
# tmp = two_lambda * self.zeta * np.exp(-self.zeta * d2) / n
# tx = tmp * dx
# ty = tmp * dy
# tz = tmp * dz
#
# dc_dr_x[l+1][i] += tx
# dc_dr_y[l+1][i] += ty
# dc_dr_z[l+1][i] += tz
# dc_dr_x[l+1][j] -= tx
# dc_dr_y[l+1][j] -= ty
# dc_dr_z[l+1][j] -= tz
# #
# # # for i in range(particle_input.output_size):
# # # rx_i = particle_input.rx[i]
# # # ry_i = particle_input.ry[i]
# # # rz_i = particle_input.rz[i]
# # # for j in range(i+1, particle_input.output_size):
# # # dx = particle_input.rx[j] - rx_i
# # # dy = particle_input.ry[j] - ry_i
# # # dz = particle_input.rz[j] - rz_i
# # # d2 = dx*dx + dy*dy + dz*dz
# # # tmp = two_lambda * self.zeta * np.exp(-self.zeta * d2)
# # # tx = tmp * dx
# # # ty = tmp * dy
# # # tz = tmp * dz
# # #
# # # dc_dr_x[0][i] += tx
# # # dc_dr_y[0][i] += ty
# # # dc_dr_z[0][i] += tz
# # # dc_dr_x[0][j] -= tx
# # # dc_dr_y[0][j] -= ty
# # # dc_dr_z[0][j] -= tz
#
# dc_dr = (dc_dr_x, dc_dr_y, dc_dr_z)
return dc_dq, dc_db, dc_dr
| mit | 2,553,449,654,175,343,000 | 36.984848 | 141 | 0.406063 | false |
ricomoss/python-april-2014 | class3/script.py | 1 | 2799 | #!/usr/bin/env python
def get_highest_test_score(test_scores):
highest_score = 0
name = None
for key, val in test_scores.items():
if val > highest_score:
highest_score = val
name = key
return name, highest_score
def exercise1():
test_scores = {
'james': 75,
'karen': 78,
'albert': 92,
'kim': 66,
'susan': 90,
'rick': 88,
}
name, score = get_highest_test_score(test_scores)
print('{} scored {}'.format(name, score))
def get_avg_test_info(test_scores):
scores_tot = 0
for val in test_scores.values():
scores_tot += val
avg = scores_tot / len(test_scores)
names = list()
for key, val in test_scores.items():
if val > avg:
names.append(key)
return avg, names
def exercise2():
test_scores = {
'james': 75,
'karen': 78,
'albert': 92,
'kim': 66,
'susan': 90,
'rick': 88,
}
avg, names = get_avg_test_info(test_scores)
print('The average score was {} and the following people scored '
'above average:'.format(avg))
for name in names:
print('\t{}'.format(name))
def get_fib_list(num):
fib_list = list()
for index in range(1, num + 1):
fib_list.append(get_fib_num(index))
return fib_list
def get_fib_num(n):
if n < 1:
raise IndexError('A sequence must have a positive integer index.')
elif n == 1:
return 0
elif n == 2:
return 1
else:
return get_fib_num(n - 1) + get_fib_num(n - 2)
def exercise3():
fib_list = get_fib_list(15)
print(fib_list)
def exercise4():
my_dict = dict()
for index in range(1, 35):
my_dict[index] = get_fib_list(index)
pretty_print(my_dict)
# Advanced Exercise 1
def pretty_print(obj, indents=0):
if isinstance(obj, (list, tuple)):
opening = '{}['
closing = '{}]'
if isinstance(obj, tuple):
opening = '{}('
closing = '{})'
print(opening.format(' ' * 4 * indents))
for item in obj:
pretty_print(item, 1)
print(closing.format(' ' * 4 * indents))
elif isinstance(obj, dict):
print('{}{{'.format(' ' * 4 * indents))
for key, val in obj.items():
if isinstance(val, dict):
print('{}{}: {'.format(' ' * 4 * (indents + 1), key))
pretty_print(val, indents + 1)
else:
print('{}{}: {}'.format(' ' * 4 * (indents + 1), key, val))
closing = '{}}},'
if indents == 0:
closing = '{}}}'
print(closing.format(' ' * 4 * indents))
if __name__ == '__main__':
exercise1()
exercise2()
exercise3()
exercise4()
| mit | 1,735,881,688,317,963,000 | 22.521008 | 75 | 0.510539 | false |
NathanW2/qmap | src/qmap/floatingtoolbar.py | 1 | 1226 | from PyQt4.QtGui import QToolBar, QActionGroup
from PyQt4.QtCore import Qt, QPoint
from utils import log
class FloatingToolBar(QToolBar):
"""
A floating QToolBar with no border and is offset under its parent
"""
def __init__(self, name, parent):
"""
parent: The parent of this toolbar. Should be another toolbar
"""
QToolBar.__init__(self,name, parent)
self.setMovable(False)
self.setWindowFlags(Qt.Tool | Qt.FramelessWindowHint | Qt.X11BypassWindowManagerHint)
self.setAllowedAreas(Qt.NoToolBarArea)
self.actiongroup = QActionGroup(self)
def addToActionGroup(self, action):
self.actiongroup.addAction(action)
def showToolbar(self, parentaction, defaultaction, toggled):
if toggled:
self.show()
if defaultaction:
defaultaction.toggle()
widget = self.parent().widgetForAction(parentaction)
x = self.parent().mapToGlobal(widget.pos()).x()
y = self.parent().mapToGlobal(widget.pos()).y()
newpoint = QPoint(x, y + self.parent().rect().height())
# if self.orientation() == Qt.Vertical:
# newpoint = QPoint(x, y + self.parent().rect().width())
self.move(newpoint)
else:
action = self.actiongroup.checkedAction()
if action:
action.toggle()
self.hide()
| gpl-2.0 | 2,753,613,497,718,192,000 | 31.263158 | 87 | 0.709625 | false |
willrp/willbuyer | backend/tests/integration/controller/api/cart/test_select_all_controller.py | 1 | 1098 | import pytest
from flask import json
from json.decoder import JSONDecodeError
from backend.util.response.cart import CartSchema
def test_select_all_controller(flask_app, es_create):
prod_list = es_create("products", 2)
item_id = prod_list[0].meta["id"]
item_id_2 = prod_list[1].meta["id"]
with flask_app.test_client() as client:
response = client.get(
"api/cart"
)
with pytest.raises(JSONDecodeError):
json.loads(response.data)
assert response.status_code == 204
with client.session_transaction() as sess:
assert "cart" not in sess
sess["cart"] = {item_id: 1, item_id_2: 2}
assert sess["cart"][item_id] == 1
assert sess["cart"][item_id_2] == 2
response = client.get(
"api/cart"
)
data = json.loads(response.data)
CartSchema().load(data)
assert response.status_code == 200
for item in data["products"]:
assert item["id"] in [item_id, item_id_2]
assert item["amount"] in [1, 2]
| mit | -7,319,561,115,062,300,000 | 27.153846 | 53 | 0.577413 | false |
OpenSoccerManager/opensoccermanager-editor | structures/skills.py | 1 | 1651 | #!/usr/bin/env python3
# This file is part of OpenSoccerManager-Editor.
#
# OpenSoccerManager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# OpenSoccerManager is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# OpenSoccerManager. If not, see <http://www.gnu.org/licenses/>.
class Skills:
def __init__(self):
self.skills = (("Keeping", "KP"),
("Tackling", "TK"),
("Passing", "PS"),
("Shooting", "SH"),
("Heading", "HD"),
("Pace", "PC"),
("Stamina", "ST"),
("Ball Control", "BC"),
("Set Pieces", "SP"))
def get_short_skills(self):
'''
Return list of shortened skill names.
'''
skills = [skill[1] for skill in self.skills]
return skills
def get_skills(self):
'''
Return list of full skill names.
'''
skills = [skill[0] for skill in self.skills]
return skills
def get_skill_by_index(self, index):
'''
Return skill tuple for given index.
'''
return self.skills[index]
| gpl-3.0 | -8,273,877,856,923,382,000 | 31.372549 | 79 | 0.566929 | false |
philgyford/django-ditto | ditto/pinboard/admin.py | 1 | 2602 | from django.contrib import admin
from django.db import models
from django.forms import Textarea, TextInput
from taggit.managers import TaggableManager
from taggit.forms import TagWidget
from ..core.admin import DittoItemModelAdmin
from .models import Account, Bookmark
@admin.register(Account)
class AccountAdmin(admin.ModelAdmin):
list_display = (
"username",
"is_active",
"time_created",
"time_modified",
)
fieldsets = (
(None, {"fields": ("username", "url", "is_active",)}),
(
"API",
{
"fields": ("api_token",),
"description": (
"Your API Token can be found at "
'<a href="https://pinboard.in/settings/password">'
"pinboard.in/settings/password</a>"
),
},
),
("Data", {"fields": ("time_created", "time_modified",)}),
)
readonly_fields = (
"time_created",
"time_modified",
)
@admin.register(Bookmark)
class BookmarkAdmin(DittoItemModelAdmin):
list_display = (
"title",
"post_time",
"account",
)
list_filter = (
"post_time",
"is_private",
"to_read",
"account",
)
fieldsets = (
(
None,
{
"fields": (
"account",
"title",
"url",
"description",
"summary",
"tags",
"post_time",
"post_year_str",
"url_hash",
)
},
),
("Options", {"fields": ("is_private", "to_read",)}),
("Data", {"fields": ("raw", "fetch_time", "time_created", "time_modified",)}),
)
formfield_overrides = {
# Make the inputs full-width.
models.CharField: {"widget": TextInput(attrs={"class": "vLargeTextField"})},
# Reduce the number of rows; similar to Pinboard's description field.
models.TextField: {
"widget": Textarea(
attrs={"class": "vLargeTextField", "cols": 40, "rows": 4}
)
},
# Make the input full-width.
TaggableManager: {"widget": TagWidget(attrs={"class": "vLargeTextField"})},
}
readonly_fields = (
"post_year_str",
"raw",
"fetch_time",
"time_created",
"time_modified",
)
search_fields = (
"title",
"url",
"description",
)
| mit | 3,542,037,757,899,506,000 | 24.762376 | 86 | 0.468486 | false |
xrubio/abm4s3 | python/experimentTransmission.py | 1 | 2038 | #
# Copyright (c) 2015 - Xavier Rubio-Campillo
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version
#
# The source code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/python3
import transmission, random
def singleRun():
params = transmission.Params()
params.transmissionType = 'prestige'
params.nAgents = 30
params.nSteps = 100
params.output = 'output.csv'
params.oneFile = False
transmission.run(params)
def experiment():
numRuns = 100
transmissionTypeSweep = ['vertical','encounter','prestige','conformist']
params = transmission.Params()
params.xDim = 10
params.yDim = 10
params.replacementRate = 0.1
params.moveDistance = 1.0
params.interactionRadius = 1.0
params.innovationRate = 0.01
params.nTraits = 3
params.nTraitRange = 5
params.prestigeIndex = 1
params.nSteps = 1000
params.storeAllSteps = True
params.oneFile = False
totalRuns = 0
# perform numRuns of each type, randomly sampling from nAgents 10 to 500
for i in transmissionTypeSweep:
for j in range(0, numRuns):
print('run:',totalRuns+1,'of:',numRuns*len(transmissionTypeSweep))
params.numRun = totalRuns
params.transmissionType = i
params.nAgents = random.randint(50,500)
params.output = 'output_tr_'+str(params.numRun)+'.csv'
totalRuns += 1
transmission.run(params)
def main():
singleRun()
if __name__ == "__main__":
main()
| gpl-3.0 | 6,615,233,437,712,181,000 | 30.353846 | 79 | 0.675662 | false |
SCM-NV/qmworks-namd | nanoqm/workflows/workflow_coupling.py | 1 | 2751 | """Workflow to compute the derivate coupling between states.
The ``workflow_derivative_couplings`` expected a file with a trajectory-like
file with the molecular geometries to compute the couplings.
Index
-----
.. currentmodule:: nanoqm.workflows.workflow_coupling
.. autosummary::
"""
__all__ = ['workflow_derivative_couplings']
import logging
import os
import shutil
from os.path import join
from pathlib import Path
from typing import List, Tuple
from noodles import gather, schedule, unpack
from qmflows import run
from qmflows.type_hints import PathLike
from ..common import DictConfig
from ..schedule.components import calculate_mos
from ..schedule.scheduleCoupling import (calculate_overlap, lazy_couplings,
write_hamiltonians)
from .initialization import initialize
# Starting logger
logger = logging.getLogger(__name__)
def workflow_derivative_couplings(config: DictConfig) -> Tuple[List[PathLike], List[PathLike]]:
"""Compute the derivative couplings for a molecular dynamic trajectory."""
# Dictionary containing the general configuration
config.update(initialize(config))
logger.info("starting couplings calculation!")
# compute the molecular orbitals
mo_paths_hdf5, energy_paths_hdf5 = unpack(calculate_mos(config), 2)
# mo_paths_hdf5 = run(calculate_mos(config), folder=config.workdir)
# Overlap matrix at two different times
promised_overlaps = calculate_overlap(config, mo_paths_hdf5)
# Calculate Non-Adiabatic Coupling
promised_crossing_and_couplings = lazy_couplings(config, promised_overlaps)
# Write the results in PYXAID format
config.path_hamiltonians = create_path_hamiltonians(config.workdir)
# Inplace scheduling of write_hamiltonians function.
# Equivalent to add @schedule on top of the function
schedule_write_ham = schedule(write_hamiltonians)
# Number of matrix computed
config["nPoints"] = len(config.geometries) - 2
# Write Hamilotians in PYXAID format
promise_files = schedule_write_ham(
config, promised_crossing_and_couplings, mo_paths_hdf5)
results = run(
gather(promise_files, energy_paths_hdf5), folder=config.workdir, always_cache=False)
remove_folders(config.folders)
return results
def create_path_hamiltonians(workdir: PathLike) -> PathLike:
"""Create the Paths to store the resulting hamiltonians."""
path_hamiltonians = join(workdir, 'hamiltonians')
if not os.path.exists(path_hamiltonians):
os.makedirs(path_hamiltonians)
return path_hamiltonians
def remove_folders(folders: List[PathLike]):
"""Remove unused folders."""
for f in folders:
if Path(f).exists():
shutil.rmtree(f)
| mit | 748,809,113,016,834,600 | 29.566667 | 95 | 0.727008 | false |
topwebmaster/cookiecutter-django | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/taskapp/celery.py | 1 | 2180 | {% if cookiecutter.use_celery == 'y' %}
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('{{cookiecutter.project_slug}}')
class CeleryAppConfig(AppConfig):
name = '{{cookiecutter.project_slug}}.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
{% if cookiecutter.use_sentry == 'y' -%}
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
{% if cookiecutter.use_pycharm == 'y' -%}
# Since raven is required in production only,
# imports might (most surely will) be wiped out
# during PyCharm code clean up started
# in other environments.
# @formatter:off
{%- endif %}
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
{% if cookiecutter.use_pycharm == 'y' -%}
# @formatter:on
{%- endif %}
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['dsn'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
{%- endif %}
@app.task(bind=True)
def debug_task(self):
print(f'Request: {self.request!r}') # pragma: no cover
{% else %}
# Use this as a starting point for your project with celery.
# If you are not using celery, you can remove this app
{% endif -%}
| bsd-3-clause | 2,726,470,006,636,422,000 | 36.586207 | 99 | 0.658257 | false |
zawata/AccessLog | dependencies/miscFunc.py | 1 | 1108 | '''
A file of small misc. functions
'''
import socket
import subprocess
import httplib2
class Singleton(type):
'''
Singlton Design Pattern metaclass
'''
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def testRoot():
'''
Test Root Access
'''
#run a simple command as root and check if we need a password
p = subprocess.Popen(
'sudo -n echo',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
retval = (p.stdout.readlines()[0].find("sudo: a password is required") == -1)
p.wait()
return retval
def testInternet():
'''
Check Internet Connection
'''
# attempt a connection to google and report success or not
conn = httplib2.HTTPConnectionWithTimeout("www.google.com", timeout=None)
try:
conn.request("HEAD", "/")
return True
except socket.gaierror:
return False
| gpl-3.0 | -1,916,738,916,484,150,300 | 22.083333 | 81 | 0.609206 | false |
cloudbase/maas | src/maastesting/tests/test_package.py | 1 | 1985 | # Copyright 2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for the `maastesting` package."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = []
from os.path import splitext
from warnings import (
catch_warnings,
warn,
)
import maastesting
from maastesting.testcase import MAASTestCase
from testtools.matchers import (
Equals,
IsInstance,
MatchesAll,
MatchesListwise,
MatchesStructure,
StartsWith,
)
class TestWarnings(MAASTestCase):
scenarios = sorted(
(package_name, dict(package_name=package_name))
for package_name in maastesting.packages
)
def test_pattern_matches_package(self):
self.assertRegexpMatches(
self.package_name, maastesting.packages_expr)
def test_pattern_matches_subpackage(self):
self.assertRegexpMatches(
self.package_name + ".foo", maastesting.packages_expr)
def assertWarningsEnabled(self, category):
message = "%s from %s" % (category.__name__, self.package_name)
filename, ext = splitext(__file__)
with catch_warnings(record=True) as log:
warn(message, category=category)
self.assertThat(log, MatchesListwise([
MatchesStructure(
message=MatchesAll(
IsInstance(category),
MatchesStructure.byEquality(args=(message,)),
),
category=Equals(category),
filename=StartsWith(filename),
),
]))
def test_BytesWarning_enabled(self):
self.assertWarningsEnabled(BytesWarning)
def test_DeprecationWarning_enabled(self):
self.assertWarningsEnabled(DeprecationWarning)
def test_ImportWarning_enabled(self):
self.assertWarningsEnabled(ImportWarning)
| agpl-3.0 | 5,368,649,101,309,266,000 | 26.191781 | 71 | 0.644332 | false |
dyoussef/s2p | s2plib/rectification.py | 1 | 16836 | # Copyright (C) 2015, Carlo de Franchis <[email protected]>
# Copyright (C) 2015, Gabriele Facciolo <[email protected]>
# Copyright (C) 2015, Enric Meinhardt <[email protected]>
from __future__ import print_function
import os
import numpy as np
from s2plib import rpc_model
from s2plib import rpc_utils
from s2plib import estimation
from s2plib import evaluation
from s2plib import common
from s2plib import visualisation
from s2plib import block_matching
from s2plib.config import cfg
def filter_matches_epipolar_constraint(F, matches, thresh):
"""
Discards matches that are not consistent with the epipolar constraint.
Args:
F: fundamental matrix
matches: list of pairs of 2D points, stored as a Nx4 numpy array
thresh: maximum accepted distance between a point and its matched
epipolar line
Returns:
the list of matches that satisfy the constraint. It is a sub-list of
the input list.
"""
out = []
for match in matches:
x = np.array([match[0], match[1], 1])
xx = np.array([match[2], match[3], 1])
d1 = evaluation.distance_point_to_line(x, np.dot(F.T, xx))
d2 = evaluation.distance_point_to_line(xx, np.dot(F, x))
if max(d1, d2) < thresh:
out.append(match)
return np.array(out)
def register_horizontally_shear(matches, H1, H2):
"""
Adjust rectifying homographies with tilt, shear and translation to reduce the disparity range.
Args:
matches: list of pairs of 2D points, stored as a Nx4 numpy array
H1, H2: two homographies, stored as numpy 3x3 matrices
Returns:
H2: corrected homography H2
The matches are provided in the original images coordinate system. By
transforming these coordinates with the provided homographies, we obtain
matches whose disparity is only along the x-axis.
"""
# transform the matches according to the homographies
p1 = common.points_apply_homography(H1, matches[:, :2])
x1 = p1[:, 0]
y1 = p1[:, 1]
p2 = common.points_apply_homography(H2, matches[:, 2:])
x2 = p2[:, 0]
y2 = p2[:, 1]
if cfg['debug']:
print("Residual vertical disparities: max, min, mean. Should be zero")
print(np.max(y2 - y1), np.min(y2 - y1), np.mean(y2 - y1))
# we search the (a, b, c) vector that minimises \sum (x1 - (a*x2+b*y2+c))^2
# it is a least squares minimisation problem
A = np.vstack((x2, y2, y2*0+1)).T
a, b, c = np.linalg.lstsq(A, x1)[0].flatten()
# correct H2 with the estimated tilt, shear and translation
return np.dot(np.array([[a, b, c], [0, 1, 0], [0, 0, 1]]), H2)
def register_horizontally_translation(matches, H1, H2, flag='center'):
"""
Adjust rectifying homographies with a translation to modify the disparity range.
Args:
matches: list of pairs of 2D points, stored as a Nx4 numpy array
H1, H2: two homographies, stored as numpy 3x3 matrices
flag: option needed to control how to modify the disparity range:
'center': move the barycenter of disparities of matches to zero
'positive': make all the disparities positive
'negative': make all the disparities negative. Required for
Hirshmuller stereo (java)
Returns:
H2: corrected homography H2
The matches are provided in the original images coordinate system. By
transforming these coordinates with the provided homographies, we obtain
matches whose disparity is only along the x-axis. The second homography H2
is corrected with a horizontal translation to obtain the desired property
on the disparity range.
"""
# transform the matches according to the homographies
p1 = common.points_apply_homography(H1, matches[:, :2])
x1 = p1[:, 0]
y1 = p1[:, 1]
p2 = common.points_apply_homography(H2, matches[:, 2:])
x2 = p2[:, 0]
y2 = p2[:, 1]
# for debug, print the vertical disparities. Should be zero.
if cfg['debug']:
print("Residual vertical disparities: max, min, mean. Should be zero")
print(np.max(y2 - y1), np.min(y2 - y1), np.mean(y2 - y1))
# compute the disparity offset according to selected option
t = 0
if (flag == 'center'):
t = np.mean(x2 - x1)
if (flag == 'positive'):
t = np.min(x2 - x1)
if (flag == 'negative'):
t = np.max(x2 - x1)
# correct H2 with a translation
return np.dot(common.matrix_translation(-t, 0), H2)
def disparity_range_from_matches(matches, H1, H2, w, h):
"""
Compute the disparity range of a ROI from a list of point matches.
The estimation is based on the extrapolation of the affine registration
estimated from the matches. The extrapolation is done on the whole region of
interest.
Args:
matches: Nx4 numpy array containing a list of matches, in the full
image coordinates frame, before rectification
w, h: width and height of the rectangular ROI in the first image.
H1, H2: two rectifying homographies, stored as numpy 3x3 matrices
Returns:
disp_min, disp_max: horizontal disparity range
"""
# transform the matches according to the homographies
p1 = common.points_apply_homography(H1, matches[:, :2])
x1 = p1[:, 0]
p2 = common.points_apply_homography(H2, matches[:, 2:])
x2 = p2[:, 0]
y2 = p2[:, 1]
# compute the final disparity range
disp_min = np.floor(np.min(x2 - x1))
disp_max = np.ceil(np.max(x2 - x1))
# add a security margin to the disparity range
disp_min *= (1 - np.sign(disp_min) * cfg['disp_range_extra_margin'])
disp_max *= (1 + np.sign(disp_max) * cfg['disp_range_extra_margin'])
return disp_min, disp_max
def disparity_range(rpc1, rpc2, x, y, w, h, H1, H2, matches, A=None):
"""
Compute the disparity range of a ROI from a list of point matches.
The estimation is based on the extrapolation of the affine registration
estimated from the matches. The extrapolation is done on the whole region of
interest.
Args:
rpc1, rpc2: two instances of the rpc_model.RPCModel class
x, y, w, h: four integers defining the rectangular ROI in the first
image. (x, y) is the top-left corner, and (w, h) are the dimensions
of the rectangle.
H1, H2: two rectifying homographies, stored as numpy 3x3 matrices
matches: Nx4 numpy array containing a list of sift matches, in the full
image coordinates frame
A (optional): 3x3 numpy array containing the pointing error correction
for im2. This matrix is usually estimated with the pointing_accuracy
module.
Returns:
disp: 2-uple containing the horizontal disparity range
"""
# Default disparity range to return if everything else breaks
disp = (-3,3)
exogenous_disp = None
sift_disp = None
alt_disp = None
# Compute exogenous disparity range if needed
if (cfg['disp_range_method'] in ['exogenous', 'wider_sift_exogenous']):
exogenous_disp = rpc_utils.exogenous_disp_range_estimation(rpc1, rpc2, x, y, w, h,
H1, H2, A,
cfg['disp_range_exogenous_high_margin'],
cfg['disp_range_exogenous_low_margin'])
print("exogenous disparity range: [%f, %f]" % (exogenous_disp[0], exogenous_disp[1]))
# Compute SIFT disparity range if needed
if (cfg['disp_range_method'] in ['sift', 'wider_sift_exogenous']):
if matches is not None and len(matches)>=2:
sift_disp = disparity_range_from_matches(matches, H1, H2, w, h)
print("SIFT disparity range: [%f, %f]" % (sift_disp[0], sift_disp[1]))
else:
print("No SIFT available, SIFT disparity can not be estimated")
# Compute altitude range disparity if needed
if cfg['disp_range_method'] == 'fixed_altitude_range':
if cfg['alt_min'] is not None and cfg['alt_max'] is not None:
alt_disp = rpc_utils.altitude_range_to_disp_range(cfg['alt_min'],
cfg['alt_max'],
rpc1, rpc2,
x, y, w, h,
H1, H2, A)
print("Altitude fixed disparity range: [%f, %f]" % (alt_disp[0], alt_disp[1]))
# Now, compute disparity range according to selected method
if cfg['disp_range_method'] == 'exogenous':
if exogenous_disp is not None:
disp = exogenous_disp
elif cfg['disp_range_method'] == 'sift':
if sift_disp is not None:
disp = sift_disp
elif cfg['disp_range_method'] == 'wider_sift_exogenous':
if sift_disp is not None and exogenous_disp is not None:
disp = min(exogenous_disp[0], sift_disp[0]), max(exogenous_disp[1], sift_disp[1])
else:
if sift_disp is not None:
disp = sift_disp
else:
disp = exogenous_disp
elif cfg['disp_range_method'] == 'fixed_pixel_range':
if cfg['disp_min'] is not None and cfg['disp_max'] is not None:
disp = cfg['disp_min'], cfg['disp_max']
elif cfg['disp_range_method'] == 'fixed_altitude_range':
disp = alt_disp
# impose a minimal disparity range (TODO this is valid only with the
# 'center' flag for register_horizontally_translation)
disp = min(-3, disp[0]), max( 3, disp[1])
print("Final disparity range: [%f, %f]" % (disp[0], disp[1]))
return disp
def rectification_homographies(matches, x, y, w, h):
"""
Computes rectifying homographies from point matches for a given ROI.
The affine fundamental matrix F is estimated with the gold-standard
algorithm, then two rectifying similarities (rotation, zoom, translation)
are computed directly from F.
Args:
matches: numpy array of shape (n, 4) containing a list of 2D point
correspondences between the two images.
x, y, w, h: four integers defining the rectangular ROI in the first
image. (x, y) is the top-left corner, and (w, h) are the dimensions
of the rectangle.
Returns:
S1, S2, F: three numpy arrays of shape (3, 3) representing the
two rectifying similarities to be applied to the two images and the
corresponding affine fundamental matrix.
"""
# estimate the affine fundamental matrix with the Gold standard algorithm
F = estimation.affine_fundamental_matrix(matches)
# compute rectifying similarities
S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix(F, cfg['debug'])
if cfg['debug']:
y1 = common.points_apply_homography(S1, matches[:, :2])[:, 1]
y2 = common.points_apply_homography(S2, matches[:, 2:])[:, 1]
err = np.abs(y1 - y2)
print("max, min, mean rectification error on point matches: ", end=' ')
print(np.max(err), np.min(err), np.mean(err))
# pull back top-left corner of the ROI to the origin (plus margin)
pts = common.points_apply_homography(S1, [[x, y], [x+w, y], [x+w, y+h], [x, y+h]])
x0, y0 = common.bounding_box2D(pts)[:2]
T = common.matrix_translation(-x0, -y0)
return np.dot(T, S1), np.dot(T, S2), F
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None,
sift_matches=None, method='rpc', hmargin=0, vmargin=0):
"""
Rectify a ROI in a pair of images.
Args:
im1, im2: paths to two image files
rpc1, rpc2: paths to the two xml files containing RPC data
x, y, w, h: four integers defining the rectangular ROI in the first
image. (x, y) is the top-left corner, and (w, h) are the dimensions
of the rectangle.
out1, out2: paths to the output rectified crops
A (optional): 3x3 numpy array containing the pointing error correction
for im2. This matrix is usually estimated with the pointing_accuracy
module.
sift_matches (optional): Nx4 numpy array containing a list of sift
matches, in the full image coordinates frame
method (default: 'rpc'): option to decide wether to use rpc of sift
matches for the fundamental matrix estimation.
{h,v}margin (optional): horizontal and vertical margins added on the
sides of the rectified images
Returns:
H1, H2: Two 3x3 matrices representing the rectifying homographies that
have been applied to the two original (large) images.
disp_min, disp_max: horizontal disparity range
"""
# read RPC data
rpc1 = rpc_model.RPCModel(rpc1)
rpc2 = rpc_model.RPCModel(rpc2)
# compute real or virtual matches
if method == 'rpc':
# find virtual matches from RPC camera models
matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h,
cfg['n_gcp_per_axis'])
# correct second image coordinates with the pointing correction matrix
if A is not None:
matches[:, 2:] = common.points_apply_homography(np.linalg.inv(A),
matches[:, 2:])
else:
matches = sift_matches
# compute rectifying homographies
H1, H2, F = rectification_homographies(matches, x, y, w, h)
if cfg['register_with_shear']:
# compose H2 with a horizontal shear to reduce the disparity range
a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h))
lon, lat, alt = rpc_utils.ground_control_points(rpc1, x, y, w, h, a, a, 4)
x1, y1 = rpc1.inverse_estimate(lon, lat, alt)[:2]
x2, y2 = rpc2.inverse_estimate(lon, lat, alt)[:2]
m = np.vstack([x1, y1, x2, y2]).T
m = np.vstack({tuple(row) for row in m}) # remove duplicates due to no alt range
H2 = register_horizontally_shear(m, H1, H2)
# compose H2 with a horizontal translation to center disp range around 0
if sift_matches is not None:
sift_matches = filter_matches_epipolar_constraint(F, sift_matches,
cfg['epipolar_thresh'])
if len(sift_matches) < 10:
print('WARNING: no registration with less than 10 matches')
else:
H2 = register_horizontally_translation(sift_matches, H1, H2)
# compute disparity range
if cfg['debug']:
out_dir = os.path.dirname(out1)
np.savetxt(os.path.join(out_dir, 'sift_matches_disp.txt'),
sift_matches, fmt='%9.3f')
visualisation.plot_matches(im1, im2, rpc1, rpc2, sift_matches, x, y, w, h,
os.path.join(out_dir, 'sift_matches_disp.png'))
disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2,
sift_matches, A)
# recompute hmargin and homographies
hmargin = int(np.ceil(max([hmargin, np.fabs(disp_m), np.fabs(disp_M)])))
T = common.matrix_translation(hmargin, vmargin)
H1, H2 = np.dot(T, H1), np.dot(T, H2)
# compute rectifying homographies for non-epipolar mode (rectify the secondary tile only)
if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
H1_inv = np.linalg.inv(H1)
H1 = np.eye(3) # H1 is replaced by 2-D array with ones on the diagonal and zeros elsewhere
H2 = np.dot(H1_inv,H2)
T = common.matrix_translation(-x + hmargin, -y + vmargin)
H1 = np.dot(T, H1)
H2 = np.dot(T, H2)
# compute output images size
roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
pts1 = common.points_apply_homography(H1, roi)
x0, y0, w0, h0 = common.bounding_box2D(pts1)
# check that the first homography maps the ROI in the positive quadrant
np.testing.assert_allclose(np.round([x0, y0]), [hmargin, vmargin], atol=.01)
# apply homographies and do the crops
common.image_apply_homography(out1, im1, H1, w0 + 2*hmargin, h0 + 2*vmargin)
common.image_apply_homography(out2, im2, H2, w0 + 2*hmargin, h0 + 2*vmargin)
if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
pts_in = [[0, 0], [disp_m, 0], [disp_M, 0]]
pts_out = common.points_apply_homography(H1_inv,
pts_in)
disp_m = pts_out[1,:] - pts_out[0,:]
disp_M = pts_out[2,:] - pts_out[0,:]
return H1, H2, disp_m, disp_M
| agpl-3.0 | -1,560,706,240,523,697,700 | 40.985037 | 102 | 0.61125 | false |
derwentx/WooGenerator | tests/test_parsing_special_v2.py | 1 | 4047 | import os
import time
import unittest
from context import get_testdata, TESTS_DATA_DIR, woogenerator
from woogenerator.parsing.special import CsvParseSpecial
from woogenerator.utils import Registrar, TimeUtils
class TestCSVParseSpecialV2(unittest.TestCase):
def setUp(self):
# import_name = TimeUtils.get_ms_timestamp()
self.spec_path = os.path.join(TESTS_DATA_DIR, "specials_v2.csv")
self.special_parser_args = {
# 'import_name':import_name
}
Registrar.DEBUG_ERROR = False
Registrar.DEBUG_WARN = False
Registrar.DEBUG_MESSAGE = False
# Registrar.DEBUG_MESSAGE = True
# Registrar.DEBUG_SPECIAL = True
# Registrar.DEBUG_PARSER = True
def test_basic(self):
special_parser = CsvParseSpecial(
**self.special_parser_args
)
special_parser.analyse_file(self.spec_path)
if Registrar.DEBUG_PARSER:
Registrar.register_message("number of special groups: %s" \
% len(special_parser.rule_groups))
Registrar.register_message("number of special rules: %s" % len(special_parser.rules))
Registrar.register_message(special_parser.tabulate(tablefmt="simple"))
# check that loner has correct ending
is_singular_child = False
for index, special in special_parser.rule_groups.items():
if len(special.children) == 1:
is_singular_child = True
child = special.children[0]
self.assertEqual(index, child.index)
self.assertTrue(is_singular_child)
def test_has_happened_yet(self):
special_parser = CsvParseSpecial(
**self.special_parser_args
)
special_parser.analyse_file(self.spec_path)
TimeUtils.set_override_time(time.strptime(
"2018-01-01", TimeUtils.wp_date_format))
eofy_special = special_parser.rule_groups.get('EOFY2016')
eofy_start_time = TimeUtils.datetime2utctimestamp(eofy_special.start_time)
self.assertLess(eofy_start_time, TimeUtils.current_tsecs())
self.assertTrue(eofy_special.has_started)
self.assertTrue(eofy_special.has_finished)
self.assertFalse(eofy_special.is_active)
def test_determine_groups(self):
special_parser = CsvParseSpecial(
**self.special_parser_args
)
special_parser.analyse_file(self.spec_path)
# Registrar.DEBUG_SPECIAL = True
# Registrar.DEBUG_MESSAGE = True
override_groups = special_parser.determine_current_spec_grps(
'override',
'EOFY2016'
)
self.assertEquals(
override_groups, [special_parser.rule_groups.get('EOFY2016')])
TimeUtils.set_override_time(time.strptime(
"2018-01-01", TimeUtils.wp_date_format))
auto_next_groups = special_parser.determine_current_spec_grps(
'auto_next'
)
self.assertEquals(auto_next_groups, [])
TimeUtils.set_override_time(time.strptime(
"2016-08-11", TimeUtils.wp_date_format))
auto_next_groups = special_parser.determine_current_spec_grps(
'auto_next'
)
self.assertEquals(
auto_next_groups, [special_parser.rule_groups.get('SP2016-08-12')])
TimeUtils.set_override_time(time.strptime(
"2016-06-11", TimeUtils.wp_date_format))
auto_next_groups = special_parser.determine_current_spec_grps(
'auto_next'
)
self.assertEquals(
auto_next_groups, [special_parser.rule_groups.get('EOFY2016')])
TimeUtils.set_override_time(time.strptime(
"2016-06-13", TimeUtils.wp_date_format))
auto_next_groups = special_parser.determine_current_spec_grps(
'auto_next'
)
self.assertEquals(
auto_next_groups, [special_parser.rule_groups.get('EOFY2016')])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 6,171,959,762,056,906,000 | 31.902439 | 97 | 0.619471 | false |
qtproject/qt-creator | share/qtcreator/debugger/qttypes.py | 2 | 103408 | ############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import platform
import re
from dumper import *
def qdump__QAtomicInt(d, value):
d.putValue(value.integer())
d.putNumChild(0)
def qdump__QBasicAtomicInt(d, value):
d.putValue(value.integer())
d.putNumChild(0)
def qdump__QAtomicPointer(d, value):
d.putItem(value.cast(value.type[0].pointer()))
d.putBetterType(value.type)
def qform__QByteArray():
return [Latin1StringFormat, SeparateLatin1StringFormat,
Utf8StringFormat, SeparateUtf8StringFormat ]
def qedit__QByteArray(d, value, data):
d.call('void', value, 'resize', str(len(data)))
(base, size, alloc) = d.stringData(value)
d.setValues(base, 'char', [ord(c) for c in data])
def qdump__QByteArray(d, value):
data, size, alloc = d.byteArrayData(value)
d.check(alloc == 0 or (0 <= size and size <= alloc and alloc <= 100000000))
d.putNumChild(size)
elided, p = d.encodeByteArrayHelper(d.extractPointer(value), d.displayStringLimit)
displayFormat = d.currentItemFormat()
if displayFormat == AutomaticFormat or displayFormat == Latin1StringFormat:
d.putValue(p, 'latin1', elided=elided)
elif displayFormat == SeparateLatin1StringFormat:
d.putValue(p, 'latin1', elided=elided)
d.putDisplay('latin1:separate', d.encodeByteArray(value, limit=100000))
elif displayFormat == Utf8StringFormat:
d.putValue(p, 'utf8', elided=elided)
elif displayFormat == SeparateUtf8StringFormat:
d.putValue(p, 'utf8', elided=elided)
d.putDisplay('utf8:separate', d.encodeByteArray(value, limit=100000))
if d.isExpanded():
d.putArrayData(data, size, d.charType())
def qdump__QArrayData(d, value):
data, size, alloc = d.byteArrayDataHelper(value.address())
d.check(alloc == 0 or (0 <= size and size <= alloc and alloc <= 100000000))
d.putValue(d.readMemory(data, size), 'latin1')
d.putNumChild(1)
d.putPlainChildren(value)
def qdump__QByteArrayData(d, value):
qdump__QArrayData(d, value)
def qdump__QBitArray(d, value):
data, basize, alloc = d.byteArrayDataHelper(d.extractPointer(value['d']))
unused = d.extractByte(data)
size = basize * 8 - unused
d.putItemCount(size)
if d.isExpanded():
with Children(d, size, maxNumChild=10000):
for i in d.childRange():
q = data + 1 + int(i / 8)
with SubItem(d, i):
d.putValue((int(d.extractPointer(q)) >> (i % 8)) & 1)
d.putType('bool')
d.putNumChild(0)
def qdump__QChar(d, value):
d.putValue(d.extractUShort(value))
d.putNumChild(0)
def qform_X_QAbstractItemModel():
return [SimpleFormat, EnhancedFormat]
def qdump_X_QAbstractItemModel(d, value):
displayFormat = d.currentItemFormat()
if displayFormat == SimpleFormat:
d.putPlainChildren(value)
return
#displayFormat == EnhancedFormat:
# Create a default-constructed QModelIndex on the stack.
try:
ri = d.pokeValue(d.qtNamespace() + 'QModelIndex', '-1, -1, 0, 0')
this_ = d.makeExpression(value)
ri_ = d.makeExpression(ri)
rowCount = int(d.parseAndEvaluate('%s.rowCount(%s)' % (this_, ri_)))
columnCount = int(d.parseAndEvaluate('%s.columnCount(%s)' % (this_, ri_)))
except:
d.putPlainChildren(value)
return
d.putValue('%d x %d' % (rowCount, columnCount))
d.putNumChild(rowCount * columnCount)
if d.isExpanded():
with Children(d, numChild=rowCount * columnCount, childType=ri.type):
i = 0
for row in xrange(rowCount):
for column in xrange(columnCount):
with SubItem(d, i):
d.putName('[%s, %s]' % (row, column))
mi = d.parseAndEvaluate('%s.index(%d,%d,%s)'
% (this_, row, column, ri_))
d.putItem(mi)
i = i + 1
#gdb.execute('call free($ri)')
def qform_X_QModelIndex():
return [SimpleFormat, EnhancedFormat]
def qdump_X_QModelIndex(d, value):
displayFormat = d.currentItemFormat()
if displayFormat == SimpleFormat:
d.putPlainChildren(value)
return
r = value['r']
c = value['c']
try:
p = value['p']
except:
p = value['i']
m = value['m']
if m.pointer() == 0 or r < 0 or c < 0:
d.putValue('(invalid)')
d.putPlainChildren(value)
return
mm = m.dereference()
mm = mm.cast(mm.type.unqualified())
ns = d.qtNamespace()
try:
mi = d.pokeValue(ns + 'QModelIndex', '%s,%s,%s,%s' % (r, c, p, m))
mm_ = d.makeExpression(mm)
mi_ = d.makeExpression(mi)
rowCount = int(d.parseAndEvaluate('%s.rowCount(%s)' % (mm_, mi_)))
columnCount = int(d.parseAndEvaluate('%s.columnCount(%s)' % (mm_, mi_)))
except:
d.putPlainChildren(value)
return
try:
# Access DisplayRole as value
val = d.parseAndEvaluate('%s.data(%s, 0)' % (mm_, mi_))
v = val['d']['data']['ptr']
d.putStringValue(d.pokeValue(ns + 'QString', v))
except:
d.putValue('')
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putFields(value, False)
i = 0
for row in xrange(rowCount):
for column in xrange(columnCount):
with UnnamedSubItem(d, i):
d.putName('[%s, %s]' % (row, column))
mi2 = d.parseAndEvaluate('%s.index(%d,%d,%s)'
% (mm_, row, column, mi_))
d.putItem(mi2)
i = i + 1
d.putCallItem('parent', '@QModelIndex', value, 'parent')
#gdb.execute('call free($mi)')
def qdump__Qt__ItemDataRole(d, value):
d.putEnumValue(value.integer(), {
0 : "Qt::DisplayRole",
1 : "Qt::DecorationRole",
2 : "Qt::EditRole",
3 : "Qt::ToolTipRole",
4 : "Qt::StatusTipRole",
5 : "Qt::WhatsThisRole",
6 : "Qt::FontRole",
7 : "Qt::TextAlignmentRole",
# obsolete: 8 : "Qt::BackgroundColorRole",
8 : "Qt::BackgroundRole",
# obsolete: 9 : "Qt::TextColorRole",
9 : "Qt::ForegroundRole",
10 : "Qt::CheckStateRole",
11 : "Qt::AccessibleTextRole",
12 : "Qt::AccessibleDescriptionRole",
13 : "Qt::SizeHintRole",
14 : "Qt::InitialSortOrderRole",
# 27-31 Qt4 ItemDataRoles
27 : "Qt::DisplayPropertyRole",
28 : "Qt::DecorationPropertyRole",
29 : "Qt::ToolTipPropertyRole",
30 : "Qt::StatusTipPropertyRole",
31 : "Qt::WhatsThisPropertyRole",
0x100 : "Qt::UserRole"
})
def qdump__QStandardItemData(d, value):
role, pad, val = value.split('{@Qt::ItemDataRole}@{QVariant}')
d.putPairContents(role.value(), (role, val), 'role', 'value')
def qdump__QStandardItem(d, value):
vtable, dptr = value.split('pp')
# There used to be a virtual destructor that got removed in
# 88b6abcebf29b455438 on Apr 18 17:01:22 2017
if d.qtVersion() >= 0x050900 or d.isMsvcTarget():
model, parent, values, children, rows, cols, item = d.split('ppPPIIp', dptr)
else:
vtable1, model, parent, values, children, rows, cols, item = d.split('pppPPIIp', dptr)
d.putValue(' ')
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putSubItem('[model]', d.createValue(model, '@QStandardItemModel'))
d.putSubItem('[values]', d.createVectorItem(values, '@QStandardItemData'))
d.putSubItem('[children]', d.createVectorItem(children,
d.createPointerType(value.type)))
def qdump__QDate(d, value):
jd = value.pointer()
if jd:
d.putValue(jd, 'juliandate')
d.putNumChild(1)
if d.isExpanded():
with Children(d):
if d.canCallLocale():
d.putCallItem('toString', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'TextDate'))
d.putCallItem('(ISO)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'ISODate'))
d.putCallItem('(SystemLocale)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'SystemLocaleDate'))
d.putCallItem('(Locale)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'LocaleDate'))
d.putFields(value)
else:
d.putValue('(invalid)')
d.putNumChild(0)
def qdump__QTime(d, value):
mds = value.split('i')[0]
if mds == -1:
d.putValue('(invalid)')
d.putNumChild(0)
return
d.putValue(mds, 'millisecondssincemidnight')
if d.isExpanded():
with Children(d):
d.putCallItem('toString', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'TextDate'))
d.putCallItem('(ISO)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'ISODate'))
if d.canCallLocale():
d.putCallItem('(SystemLocale)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'SystemLocaleDate'))
d.putCallItem('(Locale)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'LocaleDate'))
d.putFields(value)
def qdump__QTimeZone(d, value):
base = d.extractPointer(value)
if base == 0:
d.putValue('(null)')
d.putNumChild(0)
return
idAddr = base + 2 * d.ptrSize() # [QSharedData] + [vptr]
d.putByteArrayValue(idAddr)
d.putPlainChildren(value['d'])
def qdump__QDateTime(d, value):
qtVersion = d.qtVersion()
isValid = False
# This relies on the Qt4/Qt5 internal structure layout:
# {sharedref(4), ...
base = d.extractPointer(value)
is32bit = d.ptrSize() == 4
if qtVersion >= 0x050200:
tiVersion = d.qtTypeInfoVersion()
#warn('TI VERSION: %s' % tiVersion)
if tiVersion is None:
tiVersion = 4
if tiVersion > 10:
status = d.extractByte(value)
#warn('STATUS: %s' % status)
if status & 0x01:
# Short data
msecs = d.extractUInt64(value) >> 8
spec = (status & 0x30) >> 4
offsetFromUtc = 0
timeZone = 0
isValid = status & 0x08
else:
dptr = d.extractPointer(value)
(msecs, status, offsetFromUtc, ref, timeZone) = d.split('qIIIp', dptr)
spec = (status & 0x30) >> 4
isValid = True
d.putValue('%s/%s/%s/%s/%s/%s' % (msecs, spec, offsetFromUtc, timeZone, status, tiVersion),
'datetimeinternal')
else:
if d.isWindowsTarget():
msecsOffset = 8
specOffset = 16
offsetFromUtcOffset = 20
timeZoneOffset = 24
statusOffset = 28 if is32bit else 32
else:
msecsOffset = 4 if is32bit else 8
specOffset = 12 if is32bit else 16
offsetFromUtcOffset = 16 if is32bit else 20
timeZoneOffset = 20 if is32bit else 24
statusOffset = 24 if is32bit else 32
status = d.extractInt(base + statusOffset)
if int(status & 0x0c == 0x0c): # ValidDate and ValidTime
isValid = True
msecs = d.extractInt64(base + msecsOffset)
spec = d.extractInt(base + specOffset)
offset = d.extractInt(base + offsetFromUtcOffset)
tzp = d.extractPointer(base + timeZoneOffset)
if tzp == 0:
tz = ''
else:
idBase = tzp + 2 * d.ptrSize() # [QSharedData] + [vptr]
elided, tz = d.encodeByteArrayHelper(d.extractPointer(idBase), limit=100)
d.putValue('%s/%s/%s/%s/%s/%s' % (msecs, spec, offset, tz, status, 0),
'datetimeinternal')
else:
# This relies on the Qt4/Qt5 internal structure layout:
# {sharedref(4), date(8), time(4+x)}
# QDateTimePrivate:
# - QAtomicInt ref; (padded on 64 bit)
# - [QDate date;]
# - - uint jd in Qt 4, qint64 in Qt 5.0 and Qt 5.1; padded on 64 bit
# - [QTime time;]
# - - uint mds;
# - Spec spec;
dateSize = 8 if qtVersion >= 0x050000 else 4 # Qt5: qint64, Qt4 uint
# 4 byte padding after 4 byte QAtomicInt if we are on 64 bit and QDate is 64 bit
refPlusPadding = 8 if qtVersion >= 0x050000 and d.ptrSize() == 8 else 4
dateBase = base + refPlusPadding
timeBase = dateBase + dateSize
mds = d.extractInt(timeBase)
isValid = mds > 0
if isValid:
jd = d.extractInt(dateBase)
d.putValue('%s/%s' % (jd, mds), 'juliandateandmillisecondssincemidnight')
if not isValid:
d.putValue('(invalid)')
d.putNumChild(0)
return
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putCallItem('toTime_t', 'unsigned int', value, 'toTime_t')
if d.canCallLocale():
d.putCallItem('toString', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'TextDate'))
d.putCallItem('(ISO)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'ISODate'))
d.putCallItem('toUTC', '@QDateTime', value, 'toTimeSpec',
d.enumExpression('TimeSpec', 'UTC'))
d.putCallItem('(SystemLocale)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'SystemLocaleDate'))
d.putCallItem('(Locale)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'LocaleDate'))
d.putCallItem('toLocalTime', '@QDateTime', value, 'toTimeSpec',
d.enumExpression('TimeSpec', 'LocalTime'))
d.putFields(value)
def qdump__QDir(d, value):
d.putNumChild(1)
privAddress = d.extractPointer(value)
bit32 = d.ptrSize() == 4
qt5 = d.qtVersion() >= 0x050000
# Change 9fc0965 reorders members again.
# bool fileListsInitialized
# QStringList files
# QFileInfoList fileInfos
# QStringList nameFilters
# QDir::SortFlags sort
# QDir::Filters filters
# Before 9fc0965:
# QDirPrivate:
# QAtomicInt ref
# QStringList nameFilters;
# QDir::SortFlags sort;
# QDir::Filters filters;
# // qt3support:
# QChar filterSepChar;
# bool matchAllDirs;
# // end qt3support
# QScopedPointer<QAbstractFileEngine> fileEngine;
# bool fileListsInitialized;
# QStringList files;
# QFileInfoList fileInfos;
# QFileSystemEntry dirEntry;
# QFileSystemEntry absoluteDirEntry;
# QFileSystemEntry:
# QString m_filePath
# QByteArray m_nativeFilePath
# qint16 m_lastSeparator
# qint16 m_firstDotInFileName
# qint16 m_lastDotInFileName
# + 2 byte padding
fileSystemEntrySize = 2 * d.ptrSize() + 8
if d.qtVersion() < 0x050200:
case = 0
elif d.qtVersion() >= 0x050300:
case = 1
else:
# Try to distinguish bool vs QStringList at the first item
# after the (padded) refcount. If it looks like a bool assume
# this is after 9fc0965. This is not safe.
firstValue = d.extractInt(privAddress + d.ptrSize())
case = 1 if firstValue == 0 or firstValue == 1 else 0
if case == 1:
if bit32:
filesOffset = 4
fileInfosOffset = 8
dirEntryOffset = 0x20
absoluteDirEntryOffset = 0x30
else:
filesOffset = 0x08
fileInfosOffset = 0x10
dirEntryOffset = 0x30
absoluteDirEntryOffset = 0x48
else:
# Assume this is before 9fc0965.
qt3support = d.isQt3Support()
qt3SupportAddition = d.ptrSize() if qt3support else 0
filesOffset = (24 if bit32 else 40) + qt3SupportAddition
fileInfosOffset = filesOffset + d.ptrSize()
dirEntryOffset = fileInfosOffset + d.ptrSize()
absoluteDirEntryOffset = dirEntryOffset + fileSystemEntrySize
d.putStringValue(privAddress + dirEntryOffset)
if d.isExpanded():
with Children(d):
if not d.isMsvcTarget():
ns = d.qtNamespace()
d.call('int', value, 'count') # Fill cache.
#d.putCallItem('absolutePath', '@QString', value, 'absolutePath')
#d.putCallItem('canonicalPath', '@QString', value, 'canonicalPath')
with SubItem(d, 'absolutePath'):
typ = d.lookupType(ns + 'QString')
d.putItem(d.createValue(privAddress + absoluteDirEntryOffset, typ))
with SubItem(d, 'entryInfoList'):
typ = d.lookupType(ns + 'QFileInfo')
qdumpHelper_QList(d, privAddress + fileInfosOffset, typ)
with SubItem(d, 'entryList'):
typ = d.lookupType(ns + 'QStringList')
d.putItem(d.createValue(privAddress + filesOffset, typ))
d.putFields(value)
def qdump__QEvent(d, value):
d.putNumChild(1)
if d.isExpanded():
with Children(d):
# Add a sub-item with the event type.
with SubItem(d, '[type]'):
(vtable, privateD, t, flags) = value.split("pp{short}{short}")
event_type_name = d.qtNamespace() + "QEvent::Type"
type_value = t.cast(event_type_name)
d.putValue(type_value.displayEnum('0x%04x', bitsize=16))
d.putType(event_type_name)
d.putNumChild(0)
# Show the rest of the class fields as usual.
d.putFields(value)
def qdump__QKeyEvent(d, value):
# QEvent fields
# virtual table pointer
# QEventPrivate *d;
# ushort t;
# ushort posted : 1;
# ushort spont : 1;
# ushort m_accept : 1;
# ushort reserved : 13;
# QInputEvent fields
# Qt::KeyboardModifiers modState;
# ulong ts;
# QKeyEvent fields
# QString txt;
# int k;
# quint32 nScanCode;
# quint32 nVirtualKey;
# quint32 nModifiers; <- nativeModifiers
# ushort c;
# ushort autor:1;
# ushort reserved:15;
(vtable, privateD, t, flags, modState, ts, txt, k, scanCode,
virtualKey, modifiers,
c, autor) = value.split("ppHHiQ{QString}{int}IIIHH")
#d.putStringValue(txt)
#data = d.encodeString(txt)
key_txt_utf8 = d.encodeStringUtf8(txt)
k_type_name = d.qtNamespace() + "Qt::Key"
k_cast_to_enum_value = k.cast(k_type_name)
k_name = k_cast_to_enum_value.displayEnum(bitsize=32)
matches = re.search(r'Key_(\w+)', k_name)
if matches:
k_name = matches.group(1)
if t == 6:
key_event_type = "Pressed"
elif t == 7:
key_event_type = "Released"
else:
key_event_type = ""
data = ""
if key_event_type:
data += "{} ".format(key_event_type)
# Try to use the name of the enum value, otherwise the value
# of txt in QKeyEvent.
if k_name:
data += "'{}'".format(k_name)
elif key_txt_utf8:
data += "'{}'".format(key_txt_utf8)
else:
data += "<non-ascii>"
k_int = k.integer()
data += " (key:{} vKey:{}".format(k_int, virtualKey)
modifier_list = []
modifier_list.append(("Shift", 0x02000000))
modifier_list.append(("Control", 0x04000000))
modifier_list.append(("Alt", 0x08000000))
modifier_list.append(("Meta", 0x10000000))
# modifier_map.append(("KeyPad", 0x20000000)) Is this useful?
modifier_list.append(("Grp", 0x40000000))
modifiers = []
for modifier_name, mask in modifier_list:
if modState & mask:
modifiers.append(modifier_name)
if modifiers:
data += " mods:" + "+".join(modifiers)
data += ")"
d.putValue(d.hexencode(data), 'utf8')
d.putNumChild(1)
if d.isExpanded():
with Children(d):
# Add a sub-item with the enum name and value.
with SubItem(d, '[{}]'.format(k_type_name)):
k_cast_to_enum_value = k.cast(k_type_name)
d.putValue(k_cast_to_enum_value.displayEnum('0x%04x', bitsize=32))
d.putType(k_type_name)
d.putNumChild(0)
# Show the rest of the class fields as usual.
d.putFields(value, dumpBase=True)
def qdump__QFile(d, value):
# 9fc0965 and a373ffcd change the layout of the private structure
qtVersion = d.qtVersion()
is32bit = d.ptrSize() == 4
if qtVersion >= 0x050600 and d.qtTypeInfoVersion() >= 17:
# Some QRingBuffer member got removed in 8f92baf5c9
if d.isWindowsTarget():
if d.isMsvcTarget():
offset = 164 if is32bit else 224
else:
offset = 160 if is32bit else 224
else:
offset = 156 if is32bit else 224
elif qtVersion >= 0x050700:
if d.isWindowsTarget():
if d.isMsvcTarget():
offset = 176 if is32bit else 248
else:
offset = 172 if is32bit else 248
else:
offset = 168 if is32bit else 248
elif qtVersion >= 0x050600:
if d.isWindowsTarget():
if d.isMsvcTarget():
offset = 184 if is32bit else 248
else:
offset = 180 if is32bit else 248
else:
offset = 168 if is32bit else 248
elif qtVersion >= 0x050500:
if d.isWindowsTarget():
offset = 164 if is32bit else 248
else:
offset = 164 if is32bit else 248
elif qtVersion >= 0x050400:
if d.isWindowsTarget():
offset = 188 if is32bit else 272
else:
offset = 180 if is32bit else 272
elif qtVersion > 0x050200:
if d.isWindowsTarget():
offset = 180 if is32bit else 272
else:
offset = 176 if is32bit else 272
elif qtVersion >= 0x050000:
offset = 176 if is32bit else 280
else:
if d.isWindowsTarget():
offset = 144 if is32bit else 232
else:
offset = 140 if is32bit else 232
vtable, privAddress = value.split('pp')
fileNameAddress = privAddress + offset
d.putStringValue(fileNameAddress)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putCallItem('exists', 'bool', value, 'exists')
d.putFields(value)
def qdump__QFileInfo(d, value):
privAddress = d.extractPointer(value)
#bit32 = d.ptrSize() == 4
#qt5 = d.qtVersion() >= 0x050000
#try:
# d.putStringValue(value['d_ptr']['d'].dereference()['fileNames'][3])
#except:
# d.putPlainChildren(value)
# return
filePathAddress = privAddress + d.ptrSize()
d.putStringValue(filePathAddress)
d.putNumChild(1)
if d.isExpanded():
ns = d.qtNamespace()
with Children(d):
stype = '@QString'
d.putCallItem('absolutePath', stype, value, 'absolutePath')
d.putCallItem('absoluteFilePath', stype, value, 'absoluteFilePath')
d.putCallItem('canonicalPath', stype, value, 'canonicalPath')
d.putCallItem('canonicalFilePath', stype, value, 'canonicalFilePath')
d.putCallItem('completeBaseName', stype, value, 'completeBaseName')
d.putCallItem('completeSuffix', stype, value, 'completeSuffix')
d.putCallItem('baseName', stype, value, 'baseName')
if platform.system() == 'Darwin':
d.putCallItem('isBundle', stype, value, 'isBundle')
d.putCallItem('bundleName', stype, value, 'bundleName')
d.putCallItem('fileName', stype, value, 'fileName')
d.putCallItem('filePath', stype, value, 'filePath')
# Crashes gdb (archer-tromey-python, at dad6b53fe)
#d.putCallItem('group', value, 'group')
#d.putCallItem('owner', value, 'owner')
d.putCallItem('path', stype, value, 'path')
d.putCallItem('groupid', 'unsigned int', value, 'groupId')
d.putCallItem('ownerid', 'unsigned int', value, 'ownerId')
#QFile::Permissions permissions () const
try:
perms = d.call('int', value, 'permissions')
except:
perms = None
if perms is None:
with SubItem(d, 'permissions'):
d.putSpecialValue('notcallable')
d.putType(ns + 'QFile::Permissions')
d.putNumChild(0)
else:
with SubItem(d, 'permissions'):
d.putEmptyValue()
d.putType(ns + 'QFile::Permissions')
d.putNumChild(10)
if d.isExpanded():
with Children(d, 10):
perms = perms['i']
d.putBoolItem('ReadOwner', perms & 0x4000)
d.putBoolItem('WriteOwner', perms & 0x2000)
d.putBoolItem('ExeOwner', perms & 0x1000)
d.putBoolItem('ReadUser', perms & 0x0400)
d.putBoolItem('WriteUser', perms & 0x0200)
d.putBoolItem('ExeUser', perms & 0x0100)
d.putBoolItem('ReadGroup', perms & 0x0040)
d.putBoolItem('WriteGroup', perms & 0x0020)
d.putBoolItem('ExeGroup', perms & 0x0010)
d.putBoolItem('ReadOther', perms & 0x0004)
d.putBoolItem('WriteOther', perms & 0x0002)
d.putBoolItem('ExeOther', perms & 0x0001)
#QDir absoluteDir () const
#QDir dir () const
d.putCallItem('caching', 'bool', value, 'caching')
d.putCallItem('exists', 'bool', value, 'exists')
d.putCallItem('isAbsolute', 'bool', value, 'isAbsolute')
d.putCallItem('isDir', 'bool', value, 'isDir')
d.putCallItem('isExecutable', 'bool', value, 'isExecutable')
d.putCallItem('isFile', 'bool', value, 'isFile')
d.putCallItem('isHidden', 'bool', value, 'isHidden')
d.putCallItem('isReadable', 'bool', value, 'isReadable')
d.putCallItem('isRelative', 'bool', value, 'isRelative')
d.putCallItem('isRoot', 'bool', value, 'isRoot')
d.putCallItem('isSymLink', 'bool', value, 'isSymLink')
d.putCallItem('isWritable', 'bool', value, 'isWritable')
d.putCallItem('created', 'bool', value, 'created')
d.putCallItem('lastModified', 'bool', value, 'lastModified')
d.putCallItem('lastRead', 'bool', value, 'lastRead')
d.putFields(value)
def qdump__QFixed(d, value):
v = value.split('i')[0]
d.putValue('%s/64 = %s' % (v, v/64.0))
d.putNumChild(0)
def qform__QFiniteStack():
return arrayForms()
def qdump__QFiniteStack(d, value):
array, alloc, size = value.split('pii')
d.check(0 <= size and size <= alloc and alloc <= 1000 * 1000 * 1000)
d.putItemCount(size)
d.putPlotData(array, size, value.type[0])
def qdump__QFlags(d, value):
i = value.split('{int}')[0]
enumType = value.type[0]
v = i.cast(enumType.name)
d.putValue(v.displayEnum('0x%04x', bitsize=32))
d.putNumChild(0)
def qform__QHash():
return mapForms()
def qdump__QHash(d, value):
qdumpHelper_QHash(d, value, value.type[0], value.type[1])
def qdump__QVariantHash(d, value):
qdumpHelper_QHash(d, value, d.createType('QString'), d.createType('QVariant'))
def qdumpHelper_QHash(d, value, keyType, valueType):
def hashDataFirstNode():
b = buckets
n = numBuckets
while n:
n -= 1
bb = d.extractPointer(b)
if bb != dptr:
return bb
b += ptrSize
return dptr
def hashDataNextNode(node):
(nextp, h) = d.split('pI', node)
if d.extractPointer(nextp):
return nextp
start = (h % numBuckets) + 1
b = buckets + start * ptrSize
n = numBuckets - start
while n:
n -= 1
bb = d.extractPointer(b)
if bb != nextp:
return bb
b += ptrSize
return nextp
ptrSize = d.ptrSize()
dptr = d.extractPointer(value)
(fakeNext, buckets, ref, size, nodeSize, userNumBits, numBits, numBuckets) = \
d.split('ppiiihhi', dptr)
d.check(0 <= size and size <= 100 * 1000 * 1000)
d.check(-1 <= ref and ref < 100000)
d.putItemCount(size)
if d.isExpanded():
isShort = d.qtVersion() < 0x050000 and keyType.name == 'int'
with Children(d, size):
node = hashDataFirstNode()
for i in d.childRange():
if isShort:
typeCode = 'P{%s}@{%s}' % (keyType.name, valueType.name)
(pnext, key, padding2, val) = d.split(typeCode, node)
else:
typeCode = 'Pi@{%s}@{%s}' % (keyType.name, valueType.name)
(pnext, hashval, padding1, key, padding2, val) = d.split(typeCode, node)
d.putPairItem(i, (key, val), 'key', 'value')
node = hashDataNextNode(node)
def qform__QHashNode():
return mapForms()
def qdump__QHashNode(d, value):
d.putPairItem(None, value)
def qHashIteratorHelper(d, value):
typeName = value.type.name
hashTypeName = typeName[0:typeName.rfind('::')]
hashType = d.lookupType(hashTypeName)
keyType = hashType[0]
valueType = hashType[1]
d.putNumChild(1)
d.putEmptyValue()
if d.isExpanded():
with Children(d):
node = d.extractPointer(value)
isShort = d.qtVersion() < 0x050000 and keyType.name == 'int'
if isShort:
typeCode = 'P{%s}@{%s}' % (keyType.name, valueType.name)
(pnext, key, padding2, val) = d.split(typeCode, node)
else:
typeCode = 'Pi@{%s}@{%s}' % (keyType.name, valueType.name)
(pnext, hashval, padding1, key, padding2, val) = d.split(typeCode, node)
d.putSubItem('key', key)
d.putSubItem('value', val)
def qdump__QHash__const_iterator(d, value):
qHashIteratorHelper(d, value)
def qdump__QHash__iterator(d, value):
qHashIteratorHelper(d, value)
def qdump__QHostAddress(d, value):
dd = d.extractPointer(value)
qtVersion = d.qtVersion()
tiVersion = d.qtTypeInfoVersion()
#warn('QT: %x, TI: %s' % (qtVersion, tiVersion))
mayNeedParse = True
if tiVersion is not None:
if tiVersion >= 16:
# After a6cdfacf
p, scopeId, a6, a4, protocol = d.split('p{QString}16s{quint32}B', dd)
mayNeedParse = False
elif tiVersion >= 5:
# Branch 5.8.0 at f70b4a13 TI: 15
# Branch 5.7.0 at b6cf0418 TI: 5
(ipString, scopeId, a6, a4, protocol, isParsed) \
= d.split('{QString}{QString}16s{quint32}B{bool}', dd)
else:
(ipString, scopeId, a4, pad, a6, protocol, isParsed) \
= d.split('{QString}{QString}{quint32}I16sI{bool}', dd)
elif qtVersion >= 0x050600: # 5.6.0 at f3aabb42
if d.ptrSize() == 8 or d.isWindowsTarget():
(ipString, scopeId, a4, pad, a6, protocol, isParsed) \
= d.split('{QString}{QString}{quint32}I16sI{bool}', dd)
else:
(ipString, scopeId, a4, a6, protocol, isParsed) \
= d.split('{QString}{QString}{quint32}16sI{bool}', dd)
elif qtVersion >= 0x050000: # 5.2.0 at 62feb088
(ipString, scopeId, a4, a6, protocol, isParsed) \
= d.split('{QString}{QString}{quint32}16sI{bool}', dd)
else: # 4.8.7 at b05d05f
(a4, a6, protocol, pad, ipString, isParsed, pad, scopeId) \
= d.split('{quint32}16sB@{QString}{bool}@{QString}', dd)
if mayNeedParse:
ipStringData, ipStringSize, ipStringAlloc = d.stringData(ipString)
if mayNeedParse and isParsed.integer() and ipStringSize > 0:
d.putStringValue(ipString)
else:
# value.d.d->protocol:
# QAbstractSocket::IPv4Protocol = 0
# QAbstractSocket::IPv6Protocol = 1
if protocol == 1:
# value.d.d->a6
data = d.hexencode(a6)
address = ':'.join('%x' % int(data[i:i+4], 16) for i in xrange(0, 32, 4))
d.putValue(address)
elif protocol == 0:
# value.d.d->a
a = a4.integer()
a, n4 = divmod(a, 256)
a, n3 = divmod(a, 256)
a, n2 = divmod(a, 256)
a, n1 = divmod(a, 256)
d.putValue('%d.%d.%d.%d' % (n1, n2, n3, n4));
else:
d.putValue('<unspecified protocol %s>' % protocol)
d.putNumChild(4)
if d.isExpanded():
with Children(d):
if mayNeedParse:
d.putSubItem('ipString', ipString)
d.putSubItem('isParsed', isParsed)
d.putSubItem('scopeId', scopeId)
d.putSubItem('a', a4)
def qdump__QIPv6Address(d, value):
raw = d.split('16s', value)[0]
data = d.hexencode(raw)
d.putValue(':'.join('%x' % int(data[i:i+4], 16) for i in xrange(0, 32, 4)))
d.putArrayData(value.address(), 16, d.lookupType('unsigned char'))
def qform__QList():
return [DirectQListStorageFormat, IndirectQListStorageFormat]
def qdump__QList(d, value):
return qdumpHelper_QList(d, value, value.type[0])
def qdump__QVariantList(d, value):
qdumpHelper_QList(d, value, d.createType('QVariant'))
def qdumpHelper_QList(d, value, innerType):
base = d.extractPointer(value)
(ref, alloc, begin, end) = d.split('IIII', base)
array = base + 16
if d.qtVersion() < 0x50000:
array += d.ptrSize()
d.check(begin >= 0 and end >= 0 and end <= 1000 * 1000 * 1000)
size = end - begin
d.check(size >= 0)
#d.checkRef(private['ref'])
d.putItemCount(size)
if d.isExpanded():
innerSize = innerType.size()
stepSize = d.ptrSize()
addr = array + begin * stepSize
# The exact condition here is:
# QTypeInfo<T>::isLarge || QTypeInfo<T>::isStatic
# but this data is available neither in the compiled binary nor
# in the frontend.
# So as first approximation only do the 'isLarge' check:
displayFormat = d.currentItemFormat()
if displayFormat == DirectQListStorageFormat:
isInternal = True
elif displayFormat == IndirectQListStorageFormat:
isInternal = False
else:
isInternal = innerSize <= stepSize and innerType.isMovableType()
if isInternal:
if innerSize == stepSize:
d.putArrayData(addr, size, innerType)
else:
with Children(d, size, childType=innerType):
for i in d.childRange():
p = d.createValue(addr + i * stepSize, innerType)
d.putSubItem(i, p)
else:
# about 0.5s / 1000 items
with Children(d, size, maxNumChild=2000, childType=innerType):
for i in d.childRange():
p = d.extractPointer(addr + i * stepSize)
x = d.createValue(p, innerType)
d.putSubItem(i, x)
def qform__QImage():
return [SimpleFormat, SeparateFormat]
def qdump__QImage(d, value):
if d.qtVersion() < 0x050000:
(vtbl, painters, imageData) = value.split('ppp');
else:
(vtbl, painters, reserved, imageData) = value.split('pppp');
if imageData == 0:
d.putValue('(invalid)')
return
(ref, width, height, depth, nbytes, padding, devicePixelRatio, colorTable,
bits, iformat) = d.split('iiiii@dppi', imageData)
d.putValue('(%dx%d)' % (width, height))
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putIntItem('width', width)
d.putIntItem('height', height)
d.putIntItem('nbytes', nbytes)
d.putIntItem('format', iformat)
with SubItem(d, 'data'):
d.putValue('0x%x' % bits)
d.putNumChild(0)
d.putType('void *')
displayFormat = d.currentItemFormat()
if displayFormat == SeparateFormat:
d.putDisplay('imagedata:separate', '%08x%08x%08x%08x' % (width, height, nbytes, iformat)
+ d.readMemory(bits, nbytes))
def qdump__QLinkedList(d, value):
dd = d.extractPointer(value)
ptrSize = d.ptrSize()
n = d.extractInt(dd + 4 + 2 * ptrSize);
ref = d.extractInt(dd + 2 * ptrSize);
d.check(0 <= n and n <= 100*1000*1000)
d.check(-1 <= ref and ref <= 1000)
d.putItemCount(n)
if d.isExpanded():
innerType = value.type[0]
with Children(d, n, maxNumChild=1000, childType=innerType):
pp = d.extractPointer(dd)
for i in d.childRange():
d.putSubItem(i, d.createValue(pp + 2 * ptrSize, innerType))
pp = d.extractPointer(pp)
qqLocalesCount = None
def qdump__QLocale(d, value):
if d.isMsvcTarget(): # as long as this dumper relies on calling functions skip it for cdb
return
# Check for uninitialized 'index' variable. Retrieve size of
# QLocale data array from variable in qlocale.cpp.
# Default is 368 in Qt 4.8, 438 in Qt 5.0.1, the last one
# being 'System'.
#global qqLocalesCount
#if qqLocalesCount is None:
# #try:
# qqLocalesCount = int(value(ns + 'locale_data_size'))
# #except:
# qqLocalesCount = 438
#try:
# index = int(value['p']['index'])
#except:
# try:
# index = int(value['d']['d']['m_index'])
# except:
# index = int(value['d']['d']['m_data']...)
#d.check(index >= 0)
#d.check(index <= qqLocalesCount)
if d.qtVersion() < 0x50000:
d.putStringValue(d.call('const char *', value, 'name'))
d.putPlainChildren(value)
return
ns = d.qtNamespace()
dd = value.extractPointer()
(data, ref, numberOptions) = d.split('pi4s', dd)
(languageId, scriptId, countryId,
decimal, group, listt, percent, zero,
minus, plus, exponential) \
= d.split('2s{short}2s'
+ '{QChar}{QChar}{short}{QChar}{QChar}'
+ '{QChar}{QChar}{QChar}', data)
d.putStringValue(d.call('const char *', value, 'name'))
d.putNumChild(1)
if d.isExpanded():
with Children(d):
prefix = ns + 'QLocale::'
d.putSubItem('country', d.createValue(countryId, prefix + 'Country'))
d.putSubItem('language', d.createValue(languageId, prefix + 'Language'))
d.putSubItem('numberOptions', d.createValue(numberOptions, prefix + 'NumberOptions'))
d.putSubItem('decimalPoint', decimal)
d.putSubItem('exponential', exponential)
d.putSubItem('percent', percent)
d.putSubItem('zeroDigit', zero)
d.putSubItem('groupSeparator', group)
d.putSubItem('negativeSign', minus)
d.putSubItem('positiveSign', plus)
d.putCallItem('measurementSystem', '@QLocale::MeasurementSystem',
value, 'measurementSystem')
d.putCallItem('timeFormat_(short)', '@QString',
value, 'timeFormat', ns + 'QLocale::ShortFormat')
d.putCallItem('timeFormat_(long)', '@QString',
value, 'timeFormat', ns + 'QLocale::LongFormat')
d.putFields(value)
def qdump__QMapNode(d, value):
d.putEmptyValue()
d.putNumChild(2)
if d.isExpanded():
with Children(d):
d.putSubItem('key', value['key'])
d.putSubItem('value', value['value'])
def qdumpHelper_Qt4_QMap(d, value, keyType, valueType):
dd = value.extractPointer()
(dummy, it, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
ref, toplevel, n) = d.split('p' * 13 + 'iii', dd)
d.check(0 <= n and n <= 100*1000*1000)
d.checkRef(ref)
d.putItemCount(n)
if d.isExpanded():
if n > 10000:
n = 10000
typeCode = '{%s}@{%s}' % (keyType.name, valueType.name)
pp, payloadSize, fields = d.describeStruct(typeCode)
with Children(d, n):
for i in d.childRange():
key, pad, value = d.split(typeCode, it - payloadSize)
d.putPairItem(i, (key, value), 'key', 'value')
dummy, it = d.split('Pp', it)
def qdumpHelper_Qt5_QMap(d, value, keyType, valueType):
dptr = d.extractPointer(value)
(ref, n) = d.split('ii', dptr)
d.check(0 <= n and n <= 100*1000*1000)
d.check(-1 <= ref and ref < 100000)
d.putItemCount(n)
if d.isExpanded():
if n > 10000:
n = 10000
typeCode = 'ppp@{%s}@{%s}' % (keyType.name, valueType.name)
def helper(node):
(p, left, right, padding1, key, padding2, value) = d.split(typeCode, node)
if left:
for res in helper(left):
yield res
yield (key, value)
if right:
for res in helper(right):
yield res
with Children(d, n):
for (pair, i) in zip(helper(dptr + 8), range(n)):
d.putPairItem(i, pair, 'key', 'value')
def qform__QMap():
return mapForms()
def qdump__QMap(d, value):
qdumpHelper_QMap(d, value, value.type[0], value.type[1])
def qdumpHelper_QMap(d, value, keyType, valueType):
if d.qtVersion() < 0x50000:
qdumpHelper_Qt4_QMap(d, value, keyType, valueType)
else:
qdumpHelper_Qt5_QMap(d, value, keyType, valueType)
def qform__QMultiMap():
return mapForms()
def qdump__QMultiMap(d, value):
qdump__QMap(d, value)
def qform__QVariantMap():
return mapForms()
def qdump__QVariantMap(d, value):
qdumpHelper_QMap(d, value, d.createType('QString'), d.createType('QVariant'))
def qdump__QMetaMethod(d, value):
d.putQMetaStuff(value, 'QMetaMethod')
def qdump__QMetaEnum(d, value):
d.putQMetaStuff(value, 'QMetaEnum')
def qdump__QMetaProperty(d, value):
d.putQMetaStuff(value, 'QMetaProperty')
def qdump__QMetaClassInfo(d, value):
d.putQMetaStuff(value, 'QMetaClassInfo')
def qdump__QMetaObject(d, value):
d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putQObjectGutsHelper(0, 0, -1, value.address(), 'QMetaObject')
d.putMembersItem(value)
if False:
def qdump__QObjectPrivate__ConnectionList(d, value):
d.putNumChild(1)
if d.isExpanded():
i = 0
with Children(d):
first, last = value.split('pp')
currentConnection = first
connectionType = d.createType('@QObjectPrivate::Connection')
while currentConnection and currentConnection != last:
sender, receiver, slotObj, nextConnectionList, nextp, prev = \
d.split('pppppp', currentConnection)
d.putSubItem(i, d.createValue(currentConnection, connectionType))
currentConnection = nextp
i += 1
d.putFields(value)
d.putItemCount(i)
else:
d.putSpecialValue('minimumitemcount', 0)
def qdump__QProcEnvKey(d, value):
d.putByteArrayValue(value)
d.putPlainChildren(value)
def qdump__QPixmap(d, value):
if d.qtVersion() < 0x050000:
(vtbl, painters, dataPtr) = value.split('ppp');
else:
(vtbl, painters, reserved, dataPtr) = s = d.split('pppp', value);
if dataPtr == 0:
d.putValue('(invalid)')
else:
(dummy, width, height) = d.split('pii', dataPtr)
d.putValue('(%dx%d)' % (width, height))
d.putPlainChildren(value)
def qdump__QMargins(d, value):
d.putValue('left:%s, top:%s, right:%s, bottom:%s' % (value.split('iiii')))
d.putPlainChildren(value)
def qdump__QPoint(d, value):
d.putValue('(%s, %s)' % (value.split('ii')))
d.putPlainChildren(value)
def qdump__QPointF(d, value):
d.putValue('(%s, %s)' % (value.split('dd')))
d.putPlainChildren(value)
def qdump__QRect(d, value):
pp = lambda l: ('+' if l >= 0 else '') + str(l)
(x1, y1, x2, y2) = d.split('iiii', value)
d.putValue('%sx%s%s%s' % (x2 - x1 + 1, y2 - y1 + 1, pp(x1), pp(y1)))
d.putPlainChildren(value)
def qdump__QRectF(d, value):
pp = lambda l: ('+' if l >= 0 else '') + str(l)
(x, y, w, h) = value.split('dddd')
d.putValue('%sx%s%s%s' % (w, h, pp(x), pp(y)))
d.putPlainChildren(value)
def qdump__QRegExp(d, value):
# value.priv.engineKey.pattern
privAddress = d.extractPointer(value)
(eng, pattern) = d.split('p{QString}', privAddress)
d.putStringValue(pattern)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
try:
d.call('void', value, 'capturedTexts') # Warm up internal cache.
except:
# Might fail (LLDB, Core files, ...), still cache might be warm.
pass
(patternSyntax, caseSensitive, minimal, pad, t, captures) \
= d.split('{int}{int}B@{QString}{QStringList}', privAddress + 2 * d.ptrSize())
d.putSubItem('syntax', patternSyntax.cast(d.qtNamespace() + 'QRegExp::PatternSyntax'))
d.putSubItem('captures', captures)
def qdump__QRegion(d, value):
regionDataPtr = d.extractPointer(value)
if regionDataPtr == 0:
d.putSpecialValue('empty')
d.putNumChild(0)
else:
if d.qtVersion() >= 0x050400: # Padding removed in ee324e4ed
(ref, pad, rgn) = d.split('i@p', regionDataPtr)
(numRects, innerArea, rects, extents, innerRect) = \
d.split('iiP{QRect}{QRect}', rgn)
elif d.qtVersion() >= 0x050000:
(ref, pad, rgn) = d.split('i@p', regionDataPtr)
(numRects, pad, rects, extents, innerRect, innerArea) = \
d.split('i@P{QRect}{QRect}i', rgn)
else:
if d.isWindowsTarget():
(ref, pad, rgn) = d.split('i@p', regionDataPtr)
else:
(ref, pad, xrgn, xrectangles, rgn) = d.split('i@ppp', regionDataPtr)
if rgn == 0:
numRects = 0
else:
(numRects, pad, rects, extents, innerRect, innerArea) = \
d.split('i@P{QRect}{QRect}i', rgn)
d.putItemCount(numRects)
if d.isExpanded():
with Children(d):
d.putIntItem('numRects', numRects)
d.putIntItem('innerArea', innerArea)
d.putSubItem('extents', extents)
d.putSubItem('innerRect', innerRect)
d.putSubItem('rects', d.createVectorItem(rects, d.qtNamespace() + 'QRect'))
def qdump__QScopedPointer(d, value):
if value.pointer() == 0:
d.putValue('(null)')
d.putNumChild(0)
else:
d.putItem(value['d'])
d.putValue(d.currentValue.value, d.currentValue.encoding)
typeName = value.type.name
if value.type[1].name == d.qtNamespace() + 'QScopedPointerDeleter<%s>' % value.type[0].name:
typeName = d.qtNamespace() + 'QScopedPointer<%s>' % value.type[0].name
d.putBetterType(typeName)
def qdump__QSet(d, value):
def hashDataFirstNode():
b = buckets
n = numBuckets
while n:
n -= 1
bb = d.extractPointer(b)
if bb != dptr:
return bb
b += ptrSize
return dptr
def hashDataNextNode(node):
(nextp, h) = d.split('pI', node)
if d.extractPointer(nextp):
return nextp
start = (h % numBuckets) + 1
b = buckets + start * ptrSize
n = numBuckets - start
while n:
n -= 1
bb = d.extractPointer(b)
if bb != nextp:
return bb
b += ptrSize
return nextp
ptrSize = d.ptrSize()
dptr = d.extractPointer(value)
(fakeNext, buckets, ref, size, nodeSize, userNumBits, numBits, numBuckets) = \
d.split('ppiiihhi', dptr)
d.check(0 <= size and size <= 100 * 1000 * 1000)
d.check(-1 <= ref and ref < 100000)
d.putItemCount(size)
if d.isExpanded():
keyType = value.type[0]
isShort = d.qtVersion() < 0x050000 and keyType.name == 'int'
with Children(d, size, childType=keyType):
node = hashDataFirstNode()
for i in d.childRange():
if isShort:
typeCode = 'P{%s}' % keyType.name
(pnext, key) = d.split(typeCode, node)
else:
typeCode = 'Pi@{%s}' % keyType.name
(pnext, hashval, padding1, key) = d.split(typeCode, node)
with SubItem(d, i):
d.putItem(key)
node = hashDataNextNode(node)
def qdump__QSharedData(d, value):
d.putValue('ref: %s' % value.to('i'))
d.putNumChild(0)
def qdump__QSharedDataPointer(d, value):
d_ptr = value['d']
if d_ptr.pointer() == 0:
d.putValue('(null)')
d.putNumChild(0)
else:
# This replaces the pointer by the pointee, making the
# pointer transparent.
try:
innerType = value.type[0]
except:
d.putValue(d_ptr)
d.putPlainChildren(value)
return
d.putBetterType(d.currentType)
d.putItem(d_ptr.dereference())
def qdump__QSize(d, value):
d.putValue('(%s, %s)' % value.split('ii'))
d.putPlainChildren(value)
def qdump__QSizeF(d, value):
d.putValue('(%s, %s)' % value.split('dd'))
d.putPlainChildren(value)
def qdump__QSizePolicy__Policy(d, value):
d.putEnumValue(value.integer(), {
0 : 'QSizePolicy::Fixed',
1 : 'QSizePolicy::GrowFlag',
2 : 'QSizePolicy::ExpandFlag',
3 : 'QSizePolicy::MinimumExpanding (GrowFlag|ExpandFlag)',
4 : 'QSizePolicy::ShrinkFlag',
5 : 'QSizePolicy::Preferred (GrowFlag|ShrinkFlag)',
7 : 'QSizePolicy::Expanding (GrowFlag|ShrinkFlag|ExpandFlag)',
8 : 'QSizePolicy::IgnoreFlag',
13 : 'QSizePolicy::Ignored (ShrinkFlag|GrowFlag|IgnoreFlag)',
})
def qdump__QSizePolicy(d, value):
bits = value.integer()
d.putEmptyValue(-99)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putIntItem('horStretch', (bits >> 0) & 0xff)
d.putIntItem('verStretch', (bits >> 8) & 0xff)
d.putEnumItem('horPolicy', (bits >> 16) & 0xf, "@QSizePolicy::Policy")
d.putEnumItem('verPolicy', (bits >> 20) & 0xf, "@QSizePolicy::Policy")
def qform__QStack():
return arrayForms()
def qdump__QStack(d, value):
qdump__QVector(d, value)
def qdump__QPolygonF(d, value):
data, size, alloc = d.vectorDataHelper(d.extractPointer(value))
d.putItemCount(size)
d.putPlotData(data, size, d.createType('QPointF'))
def qdump__QPolygon(d, value):
data, size, alloc = d.vectorDataHelper(d.extractPointer(value))
d.putItemCount(size)
d.putPlotData(data, size, d.createType('QPoint'))
def qdump__QGraphicsPolygonItem(d, value):
(vtbl, dptr) = value.split('pp')
# Assume sizeof(QGraphicsPolygonItemPrivate) == 400
if d.ptrSize() == 8:
offset = 384
elif d.isWindowsTarget():
offset = 328 if d.isMsvcTarget() else 320
else:
offset = 308
data, size, alloc = d.vectorDataHelper(d.extractPointer(dptr + offset))
d.putItemCount(size)
d.putPlotData(data, size, d.createType('QPointF'))
def qedit__QString(d, value, data):
d.call('void', value, 'resize', str(len(data)))
(base, size, alloc) = d.stringData(value)
d.setValues(base, 'short', [ord(c) for c in data])
def qform__QString():
return [SimpleFormat, SeparateFormat]
def qdump__QString(d, value):
d.putStringValue(value)
(data, size, alloc) = d.stringData(value)
d.putNumChild(size)
displayFormat = d.currentItemFormat()
if displayFormat == SeparateFormat:
d.putDisplay('utf16:separate', d.encodeString(value, limit=100000))
if d.isExpanded():
d.putArrayData(data, size, d.createType('QChar'))
def qdump__QStaticStringData(d, value):
size = value.type[0]
(ref, size, alloc, pad, offset, data) = value.split('iii@p%ss' % (2 * size))
d.putValue(d.hexencode(data), 'utf16')
d.putPlainChildren(value)
def qdump__QTypedArrayData(d, value):
if value.type[0].name == 'unsigned short':
qdump__QStringData(d, value)
else:
qdump__QArrayData(d, value)
def qdump__QStringData(d, value):
(ref, size, alloc, pad, offset) = value.split('III@p')
elided, shown = d.computeLimit(size, d.displayStringLimit)
data = d.readMemory(value.address() + offset, shown * 2)
d.putValue(data, 'utf16', elided=elided)
d.putNumChild(1)
d.putPlainChildren(value)
def qdump__QHashedString(d, value):
qdump__QString(d, value)
d.putBetterType(value.type)
def qdump__QQmlRefCount(d, value):
d.putItem(value['refCount'])
d.putBetterType(value.type)
def qdump__QStringRef(d, value):
(stringptr, pos, size) = value.split('pii')
if stringptr == 0:
d.putValue('(null)');
d.putNumChild(0)
return
(data, ssize, alloc) = d.stringData(d.createValue(stringptr, 'QString'))
d.putValue(d.readMemory(data + 2 * pos, 2 * size), 'utf16')
d.putPlainChildren(value)
def qdump__QStringList(d, value):
qdumpHelper_QList(d, value, d.createType('QString'))
d.putBetterType(value.type)
def qdump__QTemporaryFile(d, value):
qdump__QFile(d, value)
def qdump__QTextCodec(d, value):
name = d.call('const char *', value, 'name')
d.putValue(d.encodeByteArray(name, limit=100), 6)
d.putNumChild(2)
if d.isExpanded():
with Children(d):
d.putCallItem('name', '@QByteArray', value, 'name')
d.putCallItem('mibEnum', 'int', value, 'mibEnum')
d.putFields(value)
def qdump__QTextCursor(d, value):
privAddress = d.extractPointer(value)
if privAddress == 0:
d.putValue('(invalid)')
d.putNumChild(0)
else:
positionAddress = privAddress + 2 * d.ptrSize() + 8
d.putValue(d.extractInt(positionAddress))
d.putNumChild(1)
if d.isExpanded():
with Children(d):
positionAddress = privAddress + 2 * d.ptrSize() + 8
d.putIntItem('position', d.extractInt(positionAddress))
d.putIntItem('anchor', d.extractInt(positionAddress + 4))
d.putCallItem('selected', '@QString', value, 'selectedText')
d.putFields(value)
def qdump__QTextDocument(d, value):
d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putCallItem('blockCount', 'int', value, 'blockCount')
d.putCallItem('characterCount', 'int', value, 'characterCount')
d.putCallItem('lineCount', 'int', value, 'lineCount')
d.putCallItem('revision', 'int', value, 'revision')
d.putCallItem('toPlainText', '@QString', value, 'toPlainText')
d.putFields(value)
def qform__QUrl():
return [SimpleFormat, SeparateFormat]
def qdump__QUrl(d, value):
privAddress = d.extractPointer(value)
if not privAddress:
# d == 0 if QUrl was constructed with default constructor
d.putValue('<invalid>')
d.putNumChild(0)
return
if d.qtVersion() < 0x050000:
d.call('void', value, 'port') # Warm up internal cache.
d.call('void', value, 'path')
st = '{QString}'
ba = '{QByteArray}'
(ref, dummy,
scheme, userName, password, host, path, # QString
query, # QByteArray
fragment, # QString
encodedOriginal, encodedUserName, encodedPassword,
encodedPath, encodedFragment, # QByteArray
port) \
= d.split('i@' + st*5 + ba + st + ba*5 + 'i', privAddress)
else:
(ref, port, scheme, userName, password, host, path, query, fragment) \
= d.split('ii' + '{QString}' * 7, privAddress)
userNameEnc = d.encodeString(userName)
hostEnc = d.encodeString(host)
pathEnc = d.encodeString(path)
url = d.encodeString(scheme)
url += '3a002f002f00' # '://'
if len(userNameEnc):
url += userNameEnc + '4000' # '@'
url += hostEnc
if port >= 0:
url += '3a00' + ''.join(['%02x00' % ord(c) for c in str(port)])
url += pathEnc
d.putValue(url, 'utf16')
displayFormat = d.currentItemFormat()
if displayFormat == SeparateFormat:
d.putDisplay('utf16:separate', url)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putIntItem('port', port)
d.putSubItem('scheme', scheme)
d.putSubItem('userName', userName)
d.putSubItem('password', password)
d.putSubItem('host', host)
d.putSubItem('path', path)
d.putSubItem('query', query)
d.putSubItem('fragment', fragment)
d.putFields(value)
def qdump__QUuid(d, value):
r = value.split('IHHBBBBBBBB')
d.putValue('{%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x}' % r)
d.putNumChild(1)
d.putPlainChildren(value)
def qdumpHelper_QVariant_0(d, value):
# QVariant::Invalid
d.putBetterType('%sQVariant (invalid)' % d.qtNamespace())
d.putValue('(invalid)')
def qdumpHelper_QVariant_1(d, value):
# QVariant::Bool
d.putBetterType('%sQVariant (bool)' % d.qtNamespace())
d.putValue('true' if value.to('b') else 'false')
def qdumpHelper_QVariant_2(d, value):
# QVariant::Int
d.putBetterType('%sQVariant (int)' % d.qtNamespace())
d.putValue(value.to('i'))
def qdumpHelper_QVariant_3(d, value):
# uint
d.putBetterType('%sQVariant (uint)' % d.qtNamespace())
d.putValue(value.to('I'))
def qdumpHelper_QVariant_4(d, value):
# qlonglong
d.putBetterType('%sQVariant (qlonglong)' % d.qtNamespace())
d.putValue(value.to('q'))
def qdumpHelper_QVariant_5(d, value):
# qulonglong
d.putBetterType('%sQVariant (qulonglong)' % d.qtNamespace())
d.putValue(value.to('Q'))
def qdumpHelper_QVariant_6(d, value):
# QVariant::Double
d.putBetterType('%sQVariant (double)' % d.qtNamespace())
d.putValue(value.to('d'))
qdumpHelper_QVariants_A = [
qdumpHelper_QVariant_0,
qdumpHelper_QVariant_1,
qdumpHelper_QVariant_2,
qdumpHelper_QVariant_3,
qdumpHelper_QVariant_4,
qdumpHelper_QVariant_5,
qdumpHelper_QVariant_6
]
qdumpHelper_QVariants_B = [
'QChar', # 7
'QVariantMap', # 8
'QVariantList',# 9
'QString', # 10
'QStringList', # 11
'QByteArray', # 12
'QBitArray', # 13
'QDate', # 14
'QTime', # 15
'QDateTime', # 16
'QUrl', # 17
'QLocale', # 18
'QRect', # 19
'QRectF', # 20
'QSize', # 21
'QSizeF', # 22
'QLine', # 23
'QLineF', # 24
'QPoint', # 25
'QPointF', # 26
'QRegExp', # 27
'QVariantHash',# 28
]
def qdumpHelper_QVariant_31(d, value):
# QVariant::VoidStar
d.putBetterType('%sQVariant (void *)' % d.qtNamespace())
d.putValue('0x%x' % d.extractPointer(value))
def qdumpHelper_QVariant_32(d, value):
# QVariant::Long
d.putBetterType('%sQVariant (long)' % d.qtNamespace())
if d.ptrSize() == 4:
d.putValue('%s' % d.extractInt(value))
else:
d.putValue('%s' % d.extractInt64(value)) # sic!
def qdumpHelper_QVariant_33(d, value):
# QVariant::Short
d.putBetterType('%sQVariant (short)' % d.qtNamespace())
d.putValue('%s' % d.extractShort(value))
def qdumpHelper_QVariant_34(d, value):
# QVariant::Char
d.putBetterType('%sQVariant (char)' % d.qtNamespace())
d.putValue('%s' % d.extractByte(value))
def qdumpHelper_QVariant_35(d, value):
# QVariant::ULong
d.putBetterType('%sQVariant (unsigned long)' % d.qtNamespace())
if d.ptrSize() == 4:
d.putValue('%s' % d.extractUInt(value))
else:
d.putValue('%s' % d.extractUInt64(value)) # sic!
def qdumpHelper_QVariant_36(d, value):
# QVariant::UShort
d.putBetterType('%sQVariant (unsigned short)' % d.qtNamespace())
d.putValue('%s' % d.extractUShort(value))
def qdumpHelper_QVariant_37(d, value):
# QVariant::UChar
d.putBetterType('%sQVariant (unsigned char)' % d.qtNamespace())
d.putValue('%s' % d.extractByte(value))
def qdumpHelper_QVariant_38(d, value):
# QVariant::Float
d.putBetterType('%sQVariant (float)' % d.qtNamespace())
d.putValue(value.to('f'))
qdumpHelper_QVariants_D = [
qdumpHelper_QVariant_31,
qdumpHelper_QVariant_32,
qdumpHelper_QVariant_33,
qdumpHelper_QVariant_34,
qdumpHelper_QVariant_35,
qdumpHelper_QVariant_36,
qdumpHelper_QVariant_37,
qdumpHelper_QVariant_38
]
qdumpHelper_QVariants_E = [
'QFont', # 64
'QPixmap', # 65
'QBrush', # 66
'QColor', # 67
'QPalette', # 68
'QIcon', # 69
'QImage', # 70
'QPolygon', # 71
'QRegion', # 72
'QBitmap', # 73
'QCursor', # 74
]
qdumpHelper_QVariants_F = [
# Qt 5. In Qt 4 add one.
'QKeySequence',# 75
'QPen', # 76
'QTextLength', # 77
'QTextFormat', # 78
'X',
'QTransform', # 80
'QMatrix4x4', # 81
'QVector2D', # 82
'QVector3D', # 83
'QVector4D', # 84
'QQuaternion', # 85
'QPolygonF' # 86
]
def qdump__QVariant(d, value):
(data, typeStuff) = d.split('8sI', value)
variantType = typeStuff & 0x3fffffff
isShared = bool(typeStuff & 0x40000000)
# Well-known simple type.
if variantType <= 6:
qdumpHelper_QVariants_A[variantType](d, value)
d.putNumChild(0)
return None
# Extended Core type (Qt 5)
if variantType >= 31 and variantType <= 38 and d.qtVersion() >= 0x050000:
qdumpHelper_QVariants_D[variantType - 31](d, value)
d.putNumChild(0)
return None
# Extended Core type (Qt 4)
if variantType >= 128 and variantType <= 135 and d.qtVersion() < 0x050000:
if variantType == 128:
d.putBetterType('%sQVariant (void *)' % d.qtNamespace())
d.putValue('0x%x' % value.extractPointer())
else:
if variantType == 135: # Float
blob = value
else:
p = d.extractPointer(value)
blob = d.extractUInt64(p)
qdumpHelper_QVariants_D[variantType - 128](d, blob)
d.putNumChild(0)
return None
#warn('TYPE: %s' % variantType)
if variantType <= 86:
# Known Core or Gui type.
if variantType <= 28:
innert = qdumpHelper_QVariants_B[variantType - 7]
elif variantType <= 74:
innert = qdumpHelper_QVariants_E[variantType - 64]
elif d.qtVersion() < 0x050000:
innert = qdumpHelper_QVariants_F[variantType - 76]
else:
innert = qdumpHelper_QVariants_F[variantType - 75]
#data = value['d']['data']
innerType = d.qtNamespace() + innert
#warn('SHARED: %s' % isShared)
if isShared:
base1 = d.extractPointer(value)
#warn('BASE 1: %s %s' % (base1, innert))
base = d.extractPointer(base1)
#warn('SIZE 1: %s' % size)
val = d.createValue(base, innerType)
else:
#warn('DIRECT ITEM 1: %s' % innerType)
val = d.createValue(data, innerType)
val.laddress = value.laddress
d.putEmptyValue(-99)
d.putItem(val)
d.putBetterType('%sQVariant (%s)' % (d.qtNamespace(), innert))
return innert
# User types.
ns = d.qtNamespace()
d.putEmptyValue(-99)
d.putType('%sQVariant (%s)' % (ns, variantType))
d.putNumChild(1)
if d.isExpanded():
innerType = None
with Children(d):
ev = d.parseAndEvaluate
p = None
if p is None:
# Without debug info.
symbol = d.mangleName(d.qtNamespace() + 'QMetaType::typeName') + 'i'
p = ev('((const char *(*)(int))%s)(%d)' % (symbol, variantType))
#if p is None:
# p = ev('((const char *(*)(int))%sQMetaType::typeName)(%d)' % (ns, variantType))
if p is None:
# LLDB on Linux
p = ev('((const char *(*)(int))QMetaType::typeName)(%d)' % variantType)
if p is None:
d.putSpecialValue('notcallable')
return None
ptr = p.pointer()
(elided, blob) = d.encodeCArray(ptr, 1, 100)
innerType = d.hexdecode(blob)
# Prefer namespaced version.
if len(ns) > 0:
if not d.lookupNativeType(ns + innerType) is None:
innerType = ns + innerType
if isShared:
base1 = d.extractPointer(value)
base = d.extractPointer(base1)
val = d.createValue(base, innerType)
else:
val = d.createValue(data, innerType)
val.laddress = value.laddress
d.putSubItem('data', val)
if not innerType is None:
d.putBetterType('%sQVariant (%s)' % (ns, innerType))
return None
def qedit__QVector(d, value, data):
values = data.split(',')
d.call('void', value, 'resize', str(len(values)))
base, vsize, valloc = d.vectorDataHelper(d.extractPointer(value))
d.setValues(base, value.type[0].name, values)
def qform__QVector():
return arrayForms()
def qdump__QVector(d, value):
dd = d.extractPointer(value)
data, size, alloc = d.vectorDataHelper(dd)
d.check(0 <= size and size <= alloc and alloc <= 1000 * 1000 * 1000)
d.putItemCount(size)
d.putPlotData(data, size, value.type[0])
if False:
def qdump__QObjectConnectionList(d, value):
dd = d.extractPointer(value)
data, size, alloc = d.vectorDataHelper(dd)
d.check(0 <= size and size <= alloc and alloc <= 1000 * 1000 * 1000)
d.putItemCount(size)
d.putPlotData(data, size, d.createType('@QObjectPrivate::ConnectionList'))
def qdump__QVarLengthArray(d, value):
(cap, size, data) = value.split('iip')
d.check(0 <= size)
d.putItemCount(size)
d.putPlotData(data, size, value.type[0])
def qdump__QSharedPointer(d, value):
qdump_QWeakPointerHelper(d, value, False)
def qdump__QWeakPointer(d, value):
qdump_QWeakPointerHelper(d, value, True)
def qdump__QPointer(d, value):
# actually, we'd use value['wp'] instead of value, but since we
# only split() on the result and the (sub-)object address is the
# same it does not matter but saves some cycles.
qdump_QWeakPointerHelper(d, value, True, value.type[0])
def qdump_QWeakPointerHelper(d, value, isWeak, innerType = None):
if isWeak:
(d_ptr, val) = value.split('pp')
else:
(val, d_ptr) = value.split('pp')
if d_ptr == 0 and val == 0:
d.putValue('(null)')
d.putNumChild(0)
return
if d_ptr == 0 or val == 0:
d.putValue('<invalid>')
d.putNumChild(0)
return
if d.qtVersion() >= 0x050000:
(weakref, strongref) = d.split('ii', d_ptr)
else:
(vptr, weakref, strongref) = d.split('pii', d_ptr)
d.check(strongref >= -1)
d.check(strongref <= weakref)
d.check(weakref <= 10*1000*1000)
if innerType is None:
innerType = value.type[0]
with Children(d):
short = d.putSubItem('data', d.createValue(val, innerType))
d.putIntItem('weakref', weakref)
d.putIntItem('strongref', strongref)
d.putValue(short.value, short.encoding)
def qdump__QXmlAttributes__Attribute(d, value):
d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded():
with Children(d):
(qname, uri, localname, val) = value.split('{QString}' * 4)
d.putSubItem('qname', qname)
d.putSubItem('uri', uri)
d.putSubItem('localname', localname)
d.putSubItem('value', val)
def qdump__QXmlAttributes(d, value):
(vptr, atts) = value.split('pP')
innerType = d.createType(d.qtNamespace() + 'QXmlAttributes::Attribute', 4 * d.ptrSize())
val = d.createListItem(atts, innerType)
qdumpHelper_QList(d, val, innerType)
def qdump__QXmlStreamStringRef(d, value):
s = value['m_string']
(data, size, alloc) = d.stringData(s)
data += 2 * int(value['m_position'])
size = int(value['m_size'])
s = d.readMemory(data, 2 * size)
d.putValue(s, 'utf16')
d.putPlainChildren(value)
def qdump__QXmlStreamAttribute(d, value):
s = value['m_name']['m_string']
(data, size, alloc) = d.stringData(s)
data += 2 * int(value['m_name']['m_position'])
size = int(value['m_name']['m_size'])
s = d.readMemory(data, 2 * size)
d.putValue(s, 'utf16')
d.putPlainChildren(value)
#######################################################################
#
# V4
#
#######################################################################
def extractQmlData(d, value):
#if value.type.code == TypeCodePointer:
# value = value.dereference()
base = value.split('p')[0]
#mmdata = d.split('Q', base)[0]
#PointerMask = 0xfffffffffffffffd
#vtable = mmdata & PointerMask
#warn('QML DATA: %s' % value.stringify())
#data = value['data']
#return #data.cast(d.lookupType(value.type.name.replace('QV4::', 'QV4::Heap::')))
typeName = value.type.name.replace('QV4::', 'QV4::Heap::')
#warn('TYOE DATA: %s' % typeName)
return d.createValue(base, typeName)
def qdump__QV4__Heap__Base(d, value):
mm_data = value.extractPointer()
d.putValue('[%s]' % mm_data)
if d.isExpanded():
with Children(d):
with SubItem(d, 'vtable'):
d.putItem(d.createValue(mm_data & (~3), d.qtNamespace() + 'QV4::VTable'))
d.putBoolItem('isMarked', mm_data & 1)
d.putBoolItem('inUse', (mm_data & 2) == 0)
with SubItem(d, 'nextFree'):
d.putItem(d.createValue(mm_data & (~3), value.type))
def qdump__QV4__Heap__String(d, value):
# Note: There's also the 'Identifier' case. And the largestSubLength != 0 case.
(baseClass, textOrLeft, idOrRight, subtype, stringHash, largestSub, length, mm) \
= value.split('QppIIIIp')
textPtr = d.split('{QStringDataPtr}', textOrLeft)[0]
qdump__QStringData(d, d.createValue(textOrLeft, d.qtNamespace() + 'QStringData'))
if d.isExpanded():
with Children(d):
d.putFields(value)
def qmlPutHeapChildren(d, value):
d.putItem(extractQmlData(d, value))
def qdump__QV4__Object(d, value):
qmlPutHeapChildren(d, value)
def qdump__QV4__FunctionObject(d, value):
#qmlPutHeapChildren(d, value)
d.putEmptyValue()
if d.isExpanded():
with Children(d):
d.putFields(value)
d.putSubItem('heap', extractQmlData(d, value))
d.putCallItem('sourceLocation', '@QQmlSourceLocation',
value, 'sourceLocation')
def qdump__QV4__CompilationUnit(d, value):
qmlPutHeapChildren(d, value)
def qdump__QV4__CallContext(d, value):
qmlPutHeapChildren(d, value)
def qdump__QV4__ScriptFunction(d, value):
qmlPutHeapChildren(d, value)
def qdump__QV4__SimpleScriptFunction(d, value):
qdump__QV4__FunctionObject(d, value)
def qdump__QV4__ExecutionContext(d, value):
qmlPutHeapChildren(d, value)
def qdump__QQmlSourceLocation(d, value):
(sourceFile, line, col) = value.split('pHH')
(data, size, alloc) = d.stringData(value)
d.putValue(d.readMemory(data, 2 * size), 'utf16')
d.putField('valuesuffix', ':%s:%s' % (line, col))
d.putPlainChildren(value)
#def qdump__QV4__CallData(d, value):
# argc = value['argc'].integer()
# d.putItemCount(argc)
# if d.isExpanded():
# with Children(d):
# d.putSubItem('[this]', value['thisObject'])
# for i in range(0, argc):
# d.putSubItem(i, value['args'][i])
#
def qdump__QV4__String(d, value):
qmlPutHeapChildren(d, value)
def qdump__QV4__Identifier(d, value):
d.putStringValue(value)
d.putPlainChildren(value)
def qdump__QV4__PropertyHash(d, value):
data = value.extractPointer()
(ref, alloc, size, numBits, entries) = d.split('iiiip', data)
n = 0
innerType = d.qtNamespace() + 'QV4::Identifier'
with Children(d):
for i in range(alloc):
(identifier, index) = d.split('pI', entries + i * 2 * d.ptrSize())
if identifier != 0:
n += 1
with SubItem(d):
d.putItem(d, d.createValue(identifier, innerType))
d.put('keysuffix', ' %d' % index)
d.putItemCount(n)
d.putPlainChildren(value)
def qdump__QV4__InternalClass__Transition(d, value):
identifier = d.createValue(value.extractPointer(), d.qtNamespace() + 'QV4::Identifier')
d.putStringValue(identifier)
d.putPlainChildren(value)
def qdump__QV4__InternalClassTransition(d, value):
qdump__QV4__InternalClass__Transition(d, value)
def qdump__QV4__SharedInternalClassData(d, value):
(ref, alloc, size, pad, data) = value.split('iIIip')
val = d.createValue(data, value.type[0])
with Children(d):
with SubItem(d, 'data'):
d.putItem(val)
short = d.currentValue
d.putIntItem('size', size)
d.putIntItem('alloc', alloc)
d.putIntItem('refcount', ref)
d.putValue(short.value, short.encoding)
def qdump__QV4__IdentifierTable(d, value):
(engine, alloc, size, numBits, pad, entries) = value.split('piiiip')
n = 0
innerType = d.qtNamespace() + 'QV4::Heap::String'
with Children(d):
for i in range(alloc):
identifierPtr = d.extractPointer(entries + i * d.ptrSize())
if identifierPtr != 0:
n += 1
with SubItem(d, None):
d.putItem(d.createValue(identifierPtr, innerType))
d.putItemCount(n)
d.putPlainChildren(value)
if False:
# 32 bit.
QV4_Masks_SilentNaNBit = 0x00040000
QV4_Masks_NaN_Mask = 0x7ff80000
QV4_Masks_NotDouble_Mask = 0x7ffa0000
QV4_Masks_Type_Mask = 0xffffc000
QV4_Masks_Immediate_Mask = QV4_Masks_NotDouble_Mask | 0x00004000 | QV4_Masks_SilentNaNBit
QV4_Masks_IsNullOrUndefined_Mask = QV4_Masks_Immediate_Mask | 0x08000
QV4_Masks_Tag_Shift = 32
QV4_ValueType_Undefined_Type = QV4_Masks_Immediate_Mask | 0x00000
QV4_ValueType_Null_Type = QV4_Masks_Immediate_Mask | 0x10000
QV4_ValueType_Boolean_Type = QV4_Masks_Immediate_Mask | 0x08000
QV4_ValueType_Integer_Type = QV4_Masks_Immediate_Mask | 0x18000
QV4_ValueType_Managed_Type = QV4_Masks_NotDouble_Mask | 0x00000 | QV4_Masks_SilentNaNBit
QV4_ValueType_Empty_Type = QV4_Masks_NotDouble_Mask | 0x18000 | QV4_Masks_SilentNaNBit
QV4_ConvertibleToInt = QV4_Masks_Immediate_Mask | 0x1
QV4_ValueTypeInternal_Null_Type_Internal = QV4_ValueType_Null_Type | QV4_ConvertibleToInt
QV4_ValueTypeInternal_Boolean_Type_Internal = QV4_ValueType_Boolean_Type | QV4_ConvertibleToInt
QV4_ValueTypeInternal_Integer_Type_Internal = QV4_ValueType_Integer_Type | QV4_ConvertibleToInt
def QV4_getValue(d, jsval): # (Dumper, QJSValue *jsval) -> QV4::Value *
dd = d.split('Q', jsval)[0]
if dd & 3:
return 0
return dd
def QV4_getVariant(d, jsval): # (Dumper, QJSValue *jsval) -> QVariant *
dd = d.split('Q', jsval)[0]
if dd & 1:
return dd & ~3
return 0
def QV4_valueForData(d, jsval): # (Dumper, QJSValue *jsval) -> QV4::Value *
v = QV4_getValue(d, jsval)
if v:
return v
warn('Not implemented: VARIANT')
return 0
def QV4_putObjectValue(d, objectPtr):
ns = d.qtNamespace()
base = d.extractPointer(objectPtr)
(inlineMemberOffset, inlineMemberSize, internalClass, prototype,
memberData, arrayData) = d.split('IIpppp', base)
d.putValue('PTR: 0x%x' % objectPtr)
if d.isExpanded():
with Children(d):
with SubItem(d, '[raw]'):
d.putValue('[0x%x]' % objectPtr)
d.putType(' ');
d.putNumChild(0)
d.putIntItem('inlineMemberOffset', inlineMemberOffset)
d.putIntItem('inlineMemberSize', inlineMemberSize)
d.putIntItem('internalClass', internalClass)
d.putIntItem('prototype', prototype)
d.putPtrItem('memberData', memberData)
d.putPtrItem('arrayData', arrayData)
d.putSubItem('OBJ', d.createValue(objectPtr, ns + 'QV4::Object'))
#d.putFields(value)
def qdump__QV4_Object(d, value):
ns = d.qtNamespace()
d.putEmptyValue()
if d.isExpanded():
with Children(d):
with SubItem(d, '[raw]'):
base = d.extractPointer(objectPtr)
(inlineMemberOffset, inlineMemberSize, internalClass, prototype,
memberData, arrayData) = d.split('IIpppp', base)
d.putValue('PTR: 0x%x' % objectPtr)
def qdump__QV4__Value(d, value):
if d.ptrSize() == 4:
qdump_32__QV4__Value(d, value)
else:
qdump_64__QV4__Value(d, value)
def qdump_32__QV4__Value(d, value):
# QV4_Masks_SilentNaNBit = 0x00040000
# QV4_Masks_NaN_Mask = 0x7ff80000
# QV4_Masks_NotDouble_Mask = 0x7ffa0000
# QV4_Masks_Type_Mask = 0xffffc000
ns = d.qtNamespace()
v = value.split('Q')[0]
tag = v >> 32
val = v & 0xffffffff
if (tag & 0x7fff2000) == 0x7fff2000: # Int
d.putValue(val)
d.putBetterType('%sQV4::Value (int32)' % ns)
elif (tag & 0x7fff4000) == 0x7fff4000: # Bool
d.putValue(val)
d.putBetterType('%sQV4::Value (bool)' % ns)
elif (tag & 0x7fff0000) == 0x7fff0000: # Null
d.putValue(val)
d.putBetterType('%sQV4::Value (null)' % ns)
elif (tag & 0x7ffa0000) != 0x7ffa0000: # Double
d.putValue(value.split('d')[0])
d.putBetterType('%sQV4::Value (double)' % ns)
elif tag == 0x7ffa0000:
if val == 0:
d.putValue('(undefined)')
d.putBetterType('%sQV4::Value (undefined)' % ns)
else:
managed = d.createValue(val, ns + 'QV4::Heap::Base')
qdump__QV4__Heap__Base(d, managed)
#d.putValue('[0x%x]' % v)
#d.putPlainChildren(value)
if d.isExpanded():
with Children(d):
with SubItem(d, '[raw]'):
d.putValue('[0x%x]' % v)
d.putType(' ');
d.putNumChild(0)
with SubItem(d, '[val]'):
d.putValue('[0x%x]' % val)
d.putType(' ');
d.putNumChild(0)
with SubItem(d, '[tag]'):
d.putValue('[0x%x]' % tag)
d.putType(' ');
d.putNumChild(0)
#with SubItem(d, '[vtable]'):
# d.putItem(d.createValue(vtable, ns + 'QV4::VTable'))
# d.putType(' ');
# d.putNumChild(0)
d.putFields(value)
def qdump_64__QV4__Value(d, value):
dti = d.qtDeclarativeTypeInfoVersion()
new = dti is not None and dti >= 2
if new:
QV4_NaNEncodeMask = 0xfffc000000000000
QV4_Masks_Immediate_Mask = 0x00020000 # bit 49
QV4_ValueTypeInternal_Empty_Type_Internal = QV4_Masks_Immediate_Mask | 0
QV4_ConvertibleToInt = QV4_Masks_Immediate_Mask | 0x10000 # bit 48
QV4_ValueTypeInternal_Null_Type_Internal = QV4_ConvertibleToInt | 0x08000
QV4_ValueTypeInternal_Boolean_Type_Internal = QV4_ConvertibleToInt | 0x04000
QV4_ValueTypeInternal_Integer_Type_Internal = QV4_ConvertibleToInt | 0x02000
QV4_ValueType_Undefined_Type = 0 # Dummy to make generic code below pass.
else:
QV4_NaNEncodeMask = 0xffff800000000000
QV4_Masks_Immediate_Mask = 0x00018000
QV4_IsInt32Mask = 0x0002000000000000
QV4_IsDoubleMask = 0xfffc000000000000
QV4_IsNumberMask = QV4_IsInt32Mask | QV4_IsDoubleMask
QV4_IsNullOrUndefinedMask = 0x0000800000000000
QV4_IsNullOrBooleanMask = 0x0001000000000000
QV4_Masks_NaN_Mask = 0x7ff80000
QV4_Masks_Type_Mask = 0xffff8000
QV4_Masks_IsDouble_Mask = 0xfffc0000
QV4_Masks_IsNullOrUndefined_Mask = 0x00008000
QV4_Masks_IsNullOrBoolean_Mask = 0x00010000
QV4_ValueType_Undefined_Type = QV4_Masks_IsNullOrUndefined_Mask
QV4_ValueType_Null_Type = QV4_Masks_IsNullOrUndefined_Mask \
| QV4_Masks_IsNullOrBoolean_Mask
QV4_ValueType_Boolean_Type = QV4_Masks_IsNullOrBoolean_Mask
QV4_ValueType_Integer_Type = 0x20000 | QV4_Masks_IsNullOrBoolean_Mask
QV4_ValueType_Managed_Type = 0
QV4_ValueType_Empty_Type = QV4_ValueType_Undefined_Type | 0x4000
QV4_ValueTypeInternal_Null_Type_Internal = QV4_ValueType_Null_Type
QV4_ValueTypeInternal_Boolean_Type_Internal = QV4_ValueType_Boolean_Type
QV4_ValueTypeInternal_Integer_Type_Internal = QV4_ValueType_Integer_Type
QV4_PointerMask = 0xfffffffffffffffd
QV4_Masks_Tag_Shift = 32
QV4_IsDouble_Shift = 64-14
QV4_IsNumber_Shift = 64-15
QV4_IsConvertibleToInt_Shift = 64-16
QV4_IsManaged_Shift = 64-17
v = value.split('Q')[0]
tag = v >> QV4_Masks_Tag_Shift
vtable = v & QV4_PointerMask
ns = d.qtNamespace()
if (v >> QV4_IsNumber_Shift) == 1:
d.putBetterType('%sQV4::Value (int32)' % ns)
vv = v & 0xffffffff
vv = vv if vv < 0x80000000 else -(0x100000000 - vv)
d.putBetterType('%sQV4::Value (int32)' % ns)
d.putValue('%d' % vv)
elif (v >> QV4_IsDouble_Shift):
d.putBetterType('%sQV4::Value (double)' % ns)
d.putValue('%x' % (v ^ QV4_NaNEncodeMask), 'float:8')
elif tag == QV4_ValueType_Undefined_Type and not new:
d.putBetterType('%sQV4::Value (undefined)' % ns)
d.putValue('(undefined)')
elif tag == QV4_ValueTypeInternal_Null_Type_Internal:
d.putBetterType('%sQV4::Value (null?)' % ns)
d.putValue('(null?)')
elif v == 0:
if new:
d.putBetterType('%sQV4::Value (undefined)' % ns)
d.putValue('(undefined)')
else:
d.putBetterType('%sQV4::Value (null)' % ns)
d.putValue('(null)')
#elif ((v >> QV4_IsManaged_Shift) & ~1) == 1:
# d.putBetterType('%sQV4::Value (null/undef)' % ns)
# d.putValue('(null/undef)')
#elif v & QV4_IsNullOrBooleanMask:
# d.putBetterType('%sQV4::Value (null/bool)' % ns)
# d.putValue('(null/bool)')
# d.putValue(v & 1)
else:
(parentv, flags, pad, className) = d.split('pIIp', vtable)
#vtable = value['m']['vtable']
if flags & 2: # isString'
d.putBetterType('%sQV4::Value (string)' % ns)
qdump__QV4__Heap__String(d, d.createValue(v, ns + 'QV4::Heap::String'))
#d.putStringValue(d.extractPointer(value) + 2 * d.ptrSize())
#d.putValue('ptr: 0x%x' % d.extractPointer(value))
return
elif flags & 4: # isObject
d.putBetterType('%sQV4::Value (object)' % ns)
#QV4_putObjectValue(d, d.extractPointer(value) + 2 * d.ptrSize())
arrayVTable = d.symbolAddress(ns + 'QV4::ArrayObject::static_vtbl')
#warn('ARRAY VTABLE: 0x%x' % arrayVTable)
d.putNumChild(1)
d.putItem(d.createValue(d.extractPointer(value) + 2 * d.ptrSize(), ns + 'QV4::Object'))
return
elif flags & 8: # isFunction
d.putBetterType('%sQV4::Value (function)' % ns)
d.putEmptyValue()
else:
d.putBetterType('%sQV4::Value (unknown)' % ns)
#d.putValue('[0x%x]' % v)
d.putValue('[0x%x : flag 0x%x : tag 0x%x]' % (v, flags, tag))
if d.isExpanded():
with Children(d):
with SubItem(d, '[raw]'):
d.putValue('[0x%x]' % v)
d.putType(' ');
d.putNumChild(0)
with SubItem(d, '[vtable]'):
d.putItem(d.createValue(vtable, ns + 'QV4::VTable'))
d.putType(' ');
d.putNumChild(0)
d.putFields(value)
def qdump__QV__PropertyHashData(d, value):
(ref, alloc, size, numBits, entries) = value.split('IIIIp')
d.putItemCount(size)
if d.isExpanded():
with Children(d):
d.putFields(value)
def qdump__QV__PropertyHash(d, value):
qdump__QV__PropertyHashData(d, d.createValue(d.extractPointer(), value.type.name + 'Data'))
def qdump__QV4__Scoped(d, value):
innerType = value.type[0]
d.putItem(d.createValue(value.extractPointer(), innerType))
#d.putEmptyValue()
#if d.isExpanded():
# with Children(d):
# d.putSubItem('[]', d.createValue(value.extractPointer(), innerType))
# d.putFields(value)
def qdump__QV4__ScopedString(d, value):
innerType = value.type[0]
qdump__QV4__String(d, d.createValue(value.extractPointer(), innerType))
def qdump__QJSValue(d, value):
if d.ptrSize() == 4:
qdump_32__QJSValue(d, value)
else:
qdump_64__QJSValue(d, value)
def qdump_32__QJSValue(d, value):
ns = d.qtNamespace()
dd = value.split('I')[0]
d.putValue('[0x%x]' % dd)
if dd == 0:
d.putValue('(null)')
d.putType(value.type.name + ' (null)')
elif dd & 1:
variant = d.createValue(dd & ~3, ns + 'QVariant')
qdump__QVariant(d, variant)
d.putBetterType(d.currentType.value.replace('QVariant', 'QJSValue', 1))
elif dd & 3 == 0:
v4value = d.createValue(dd, ns + 'QV4::Value')
qdump_32__QV4__Value(d, v4value)
d.putBetterType(d.currentType.value.replace('QV4::Value', 'QJSValue', 1))
return
if d.isExpanded():
with Children(d):
with SubItem(d, '[raw]'):
d.putValue('[0x%x]' % dd)
d.putType(' ');
d.putNumChild(0)
d.putFields(value)
def qdump_64__QJSValue(d, value):
ns = d.qtNamespace()
dd = value.split('Q')[0]
if dd == 0:
d.putValue('(null)')
d.putType(value.type.name + ' (null)')
elif dd & 1:
variant = d.createValue(dd & ~3, ns + 'QVariant')
qdump__QVariant(d, variant)
d.putBetterType(d.currentType.value.replace('QVariant', 'QJSValue', 1))
else:
d.putEmptyValue()
#qdump__QV4__Value(d, d.createValue(dd, ns + 'QV4::Value'))
#return
if d.isExpanded():
with Children(d):
with SubItem(d, '[raw]'):
d.putValue('[0x%x]' % dd)
d.putType(' ');
d.putNumChild(0)
d.putFields(value)
def qdump__QQmlBinding(d, value):
d.putEmptyValue()
if d.isExpanded():
with Children(d):
d.putCallItem('expressionIdentifier', '@QString',
value, 'expressionIdentifier')
d.putFields(value)
#######################################################################
#
# Webkit
#
#######################################################################
def jstagAsString(tag):
# enum { Int32Tag = 0xffffffff };
# enum { CellTag = 0xfffffffe };
# enum { TrueTag = 0xfffffffd };
# enum { FalseTag = 0xfffffffc };
# enum { NullTag = 0xfffffffb };
# enum { UndefinedTag = 0xfffffffa };
# enum { EmptyValueTag = 0xfffffff9 };
# enum { DeletedValueTag = 0xfffffff8 };
if tag == -1:
return 'Int32'
if tag == -2:
return 'Cell'
if tag == -3:
return 'True'
if tag == -4:
return 'Null'
if tag == -5:
return 'Undefined'
if tag == -6:
return 'Empty'
if tag == -7:
return 'Deleted'
return 'Unknown'
def qdump__QTJSC__JSValue(d, value):
d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded():
with Children(d):
tag = value['u']['asBits']['tag']
payload = value['u']['asBits']['payload']
#d.putIntItem('tag', tag)
with SubItem(d, 'tag'):
d.putValue(jstagAsString(int(tag)))
d.putNoType()
d.putNumChild(0)
d.putIntItem('payload', int(payload))
d.putFields(value['u'])
if tag == -2:
cellType = d.lookupType('QTJSC::JSCell').pointer()
d.putSubItem('cell', payload.cast(cellType))
try:
# FIXME: This might not always be a variant.
delegateType = d.lookupType(d.qtNamespace() + 'QScript::QVariantDelegate').pointer()
delegate = scriptObject['d']['delegate'].cast(delegateType)
#d.putSubItem('delegate', delegate)
variant = delegate['m_value']
d.putSubItem('variant', variant)
except:
pass
def qdump__QScriptValue(d, value):
# structure:
# engine QScriptEnginePrivate
# jscValue QTJSC::JSValue
# next QScriptValuePrivate *
# numberValue 5.5987310416280426e-270 myns::qsreal
# prev QScriptValuePrivate *
# ref QBasicAtomicInt
# stringValue QString
# type QScriptValuePrivate::Type: { JavaScriptCore, Number, String }
#d.putEmptyValue()
dd = value['d_ptr']['d']
ns = d.qtNamespace()
if dd.pointer() == 0:
d.putValue('(invalid)')
d.putNumChild(0)
return
if int(dd['type']) == 1: # Number
d.putValue(dd['numberValue'])
d.putType('%sQScriptValue (Number)' % ns)
d.putNumChild(0)
return
if int(dd['type']) == 2: # String
d.putStringValue(dd['stringValue'])
d.putType('%sQScriptValue (String)' % ns)
return
d.putType('%sQScriptValue (JSCoreValue)' % ns)
x = dd['jscValue']['u']
tag = x['asBits']['tag']
payload = x['asBits']['payload']
#isValid = int(x['asBits']['tag']) != -6 # Empty
#isCell = int(x['asBits']['tag']) == -2
#warn('IS CELL: %s ' % isCell)
#isObject = False
#className = 'UNKNOWN NAME'
#if isCell:
# # isCell() && asCell()->isObject();
# # in cell: m_structure->typeInfo().type() == ObjectType;
# cellType = d.lookupType('QTJSC::JSCell').pointer()
# cell = payload.cast(cellType).dereference()
# dtype = 'NO DYNAMIC TYPE'
# try:
# dtype = cell.dynamic_type
# except:
# pass
# warn('DYNAMIC TYPE: %s' % dtype)
# warn('STATUC %s' % cell.type)
# type = cell['m_structure']['m_typeInfo']['m_type']
# isObject = int(type) == 7 # ObjectType;
# className = 'UNKNOWN NAME'
#warn('IS OBJECT: %s ' % isObject)
#inline bool JSCell::inherits(const ClassInfo* info) const
#for (const ClassInfo* ci = classInfo(); ci; ci = ci->parentClass) {
# if (ci == info)
# return true;
#return false;
try:
# This might already fail for 'native' payloads.
scriptObjectType = d.lookupType(ns + 'QScriptObject').pointer()
scriptObject = payload.cast(scriptObjectType)
# FIXME: This might not always be a variant.
delegateType = d.lookupType(ns + 'QScript::QVariantDelegate').pointer()
delegate = scriptObject['d']['delegate'].cast(delegateType)
#d.putSubItem('delegate', delegate)
variant = delegate['m_value']
#d.putSubItem('variant', variant)
t = qdump__QVariant(d, variant)
# Override the 'QVariant (foo)' output
d.putBetterType('%sQScriptValue (%s)' % (ns, t))
if t != 'JSCoreValue':
return
except:
pass
# This is a 'native' JSCore type for e.g. QDateTime.
d.putValue('<native>')
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putSubItem('jscValue', dd['jscValue'])
def qdump__QQmlAccessorProperties__Properties(d, value):
size = int(value['count'])
d.putItemCount(size)
if d.isExpanded():
d.putArrayData(value['properties'], size)
#
# QJson
#
def qdumpHelper_qle_cutBits(value, offset, length):
return (value >> offset) & ((1 << length) - 1)
def qdump__QJsonPrivate__qle_bitfield(d, value):
offset = value.type[0]
length = value.type[1]
val = value['val'].integer()
d.putValue('%s' % qdumpHelper_qle_cutBits(val, offset, length))
d.putNumChild(0)
def qdumpHelper_qle_signedbitfield_value(d, value):
offset = value.type[0]
length = value.type[1]
val = value['val'].integer()
val = (val >> offset) & ((1 << length) - 1)
if val >= (1 << (length - 1)):
val -= (1 << (length - 1))
return val
def qdump__QJsonPrivate__qle_signedbitfield(d, value):
d.putValue('%s' % qdumpHelper_qle_signedbitfield_value(d, value))
d.putNumChild(0)
def qdump__QJsonPrivate__q_littleendian(d, value):
d.putValue('%s' % value['val'].integer())
d.putNumChild(0)
def qdumpHelper_QJsonValue(d, data, base, pv):
"""
Parameters are the parameters to the
QJsonValue(QJsonPrivate::Data *data, QJsonPrivate::Base *base,
const QJsonPrivate::Value& pv)
constructor. We 'inline' the construction here.
data is passed as pointer integer
base is passed as pointer integer
pv is passed as 32 bit integer.
"""
d.checkIntType(data)
d.checkIntType(base)
d.checkIntType(pv)
t = qdumpHelper_qle_cutBits(pv, 0, 3)
v = qdumpHelper_qle_cutBits(pv, 5, 27)
latinOrIntValue = qdumpHelper_qle_cutBits(pv, 3, 1)
if t == 0:
d.putType('QJsonValue (Null)')
d.putValue('Null')
d.putNumChild(0)
return
if t == 1:
d.putType('QJsonValue (Bool)')
d.putValue('true' if v else 'false')
d.putNumChild(0)
return
if t == 2:
d.putType('QJsonValue (Number)')
if latinOrIntValue:
w = toInteger(v)
if w >= 0x4000000:
w -= 0x8000000
d.putValue(w)
else:
data = base + v
f = d.split('d', data)[0]
d.putValue(str(f))
d.putNumChild(0)
return
if t == 3:
d.putType('QJsonValue (String)')
data = base + v;
if latinOrIntValue:
length = d.extractUShort(data)
d.putValue(d.readMemory(data + 2, length), 'latin1')
else:
length = d.extractUInt(data)
d.putValue(d.readMemory(data + 4, length * 2), 'utf16')
d.putNumChild(0)
return
if t == 4:
d.putType('QJsonValue (Array)')
qdumpHelper_QJsonArray(d, data, base + v)
return
if t == 5:
d.putType('QJsonValue (Object)')
qdumpHelper_QJsonObject(d, data, base + v)
d.putNumChild(0)
def qdumpHelper_QJsonArray(d, data, array):
"""
Parameters are the parameters to the
QJsonArray(QJsonPrivate::Data *data, QJsonPrivate::Array *array)
constructor. We 'inline' the construction here.
array is passed as integer pointer to the QJsonPrivate::Base object.
"""
if data:
# The 'length' part of the _dummy member:
n = qdumpHelper_qle_cutBits(d.extractUInt(array + 4), 1, 31)
else:
n = 0
d.putItemCount(n)
if d.isExpanded():
with Children(d, maxNumChild=1000):
table = array + d.extractUInt(array + 8)
for i in range(n):
with SubItem(d, i):
qdumpHelper_QJsonValue(d, data, array, d.extractUInt(table + 4 * i))
def qdumpHelper_QJsonObject(d, data, obj):
"""
Parameters are the parameters to the
QJsonObject(QJsonPrivate::Data *data, QJsonPrivate::Object *object);
constructor. We "inline" the construction here.
obj is passed as integer pointer to the QJsonPrivate::Base object.
"""
if data:
# The 'length' part of the _dummy member:
n = qdumpHelper_qle_cutBits(d.extractUInt(obj + 4), 1, 31)
else:
n = 0
d.putItemCount(n)
if d.isExpanded():
with Children(d, maxNumChild=1000):
table = obj + d.extractUInt(obj + 8)
for i in range(n):
with SubItem(d, i):
entryPtr = table + 4 * i # entryAt(i)
entryStart = obj + d.extractUInt(entryPtr) # Entry::value
keyStart = entryStart + 4 # sizeof(QJsonPrivate::Entry) == 4
val = d.extractInt(entryStart)
key = d.extractInt(keyStart)
isLatinKey = qdumpHelper_qle_cutBits(val, 4, 1)
if isLatinKey:
keyLength = d.extractUShort(keyStart)
d.putField('key', d.readMemory(keyStart + 2, keyLength))
d.putField('keyencoded', 'latin1')
else:
keyLength = d.extractUInt(keyStart)
d.putField('key', d.readMemory(keyStart + 4, keyLength))
d.putField('keyencoded', 'utf16')
qdumpHelper_QJsonValue(d, data, obj, val)
def qdump__QJsonValue(d, value):
(data, dd, t) = value.split('QpI')
if t == 0:
d.putType('QJsonValue (Null)')
d.putValue('Null')
d.putNumChild(0)
return
if t == 1:
d.putType('QJsonValue (Bool)')
v = value.split('b')
d.putValue('true' if v else 'false')
d.putNumChild(0)
return
if t == 2:
d.putType('QJsonValue (Number)')
d.putValue(value.split('d'))
d.putNumChild(0)
return
if t == 3:
d.putType('QJsonValue (String)')
elided, base = d.encodeStringHelper(data, d.displayStringLimit)
d.putValue(base, 'utf16', elided=elided)
d.putNumChild(0)
return
if t == 4:
d.putType('QJsonValue (Array)')
qdumpHelper_QJsonArray(d, dd, data)
return
if t == 5:
d.putType('QJsonValue (Object)')
qdumpHelper_QJsonObject(d, dd, data)
return
d.putType('QJsonValue (Undefined)')
d.putEmptyValue()
d.putNumChild(0)
def qdump__QJsonArray(d, value):
qdumpHelper_QJsonArray(d, value['d'].pointer(), value['a'].pointer())
def qdump__QJsonObject(d, value):
qdumpHelper_QJsonObject(d, value['d'].pointer(), value['o'].pointer())
def qdump__QSqlResultPrivate(d, value):
# QSqlResult *q_ptr;
# QPointer<QSqlDriver> sqldriver;
# int idx;
# QString sql;
# bool active;
# bool isSel;
# QSqlError error;
# bool forwardOnly;
# QSql::NumericalPrecisionPolicy precisionPolicy;
# int bindCount;
# QSqlResult::BindingSyntax binds;
# QString executedQuery;
# QHash<int, QSql::ParamType> types;
# QVector<QVariant> values;
# QHash<QString, QList<int> > indexes;
# QVector<QHolder> holders
vptr, qptr, sqldriver1, sqldriver2, idx, pad, sql, active, isSel, pad, \
error1, error2, error3, \
forwardOnly, pad, precisionPolicy, bindCount, \
binds, executedQuery, types, values, indexes, holders = \
value.split('ppppi@{QString}bb@pppb@iiii{QString}ppp')
d.putStringValue(sql)
d.putPlainChildren(value)
def qdump__QSqlField(d, value):
val, dptr = value.split('{QVariant}p')
d.putNumChild(1)
qdump__QVariant(d, val)
d.putBetterType(d.currentType.value.replace('QVariant', 'QSqlField'))
d.putPlainChildren(value)
def qdump__QLazilyAllocated(d, value):
p = value.extractPointer()
if p == 0:
d.putValue("(null)")
d.putNumChild(0)
else:
d.putItem(d.createValue(p, value.type[0]))
d.putBetterType(value.type)
def qdump__qfloat16(d, value):
h = value.split('H')[0]
# Stole^H^H^HHeavily inspired by J.F. Sebastian at
# http://math.stackexchange.com/questions/1128204/how-to-convert-
# from-floating-point-binary-to-decimal-in-half-precision16-bits
sign = h >> 15
exp = (h >> 10) & 0b011111
fraction = h & (2**10 - 1)
if exp == 0:
if fraction == 0:
res = -0.0 if sign else 0.0
else:
res = (-1)**sign * fraction / 2**10 * 2**(-14) # subnormal
elif exp == 0b11111:
res = ('-inf' if sign else 'inf') if fraction == 0 else 'nan'
else:
res = (-1)**sign * (1 + 1. * fraction / 2**10) * 2**(exp - 15)
d.putValue(res)
d.putNumChild(1)
d.putPlainChildren(value)
| gpl-3.0 | 5,603,281,900,807,769,000 | 33.923337 | 103 | 0.568351 | false |
gaspar0069/upodder | upodder/test/test_upodder.py | 1 | 1760 | import unittest
import shutil
from upodder import upodder
BASEDIR = '/tmp/upodder_testing'
class TestUpodder(unittest.TestCase):
feeds = [
"http://popupchinese.com/feeds/custom/sinica",
"http://www.radiolab.org/feeds/podcast/",
"http://99percentinvisible.org/feed/",
"http://chaosradio.ccc.de/chaosradio-latest.rss",
"http://djfm.ca/?feed=rss2",
"http://feeds.feedburner.com/Sebastien-bHouseFromIbiza/",
"http://alternativlos.org/ogg.rss",
"http://www.sovereignman.com/feed/",
"http://neusprech.org/feed/",
"http://www.davidbarrkirtley.com/podcast/geeksguideshow.xml",
"http://www.cbc.ca/cmlink/1.2919550",
"http://www.sciencefriday.com/feed/scifriall.xml",
"http://feeds.feedburner.com/binaergewitter-podcast-opus",
"http://lila-podcast.de/feed/opus/",
"http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml",
"http://feeds.feedburner.com/uhhyeahdude/podcast",
]
def setUp(self):
upodder.args.no_download = True
upodder.args.mark_seen = False
upodder.args.oldness = 720
upodder.args.basedir = BASEDIR
upodder.init()
def test_feedparsing(self):
for f in self.feeds:
upodder.process_feed(f)
def test_mark_seen(self):
upodder.args.mark_seen = True
for f in self.feeds:
upodder.process_feed(f)
self.assertGreater(upodder.SeenEntry.select().count(), 5)
def tearDown(cls):
shutil.rmtree(BASEDIR);
if __name__ == '__main__':
unittest.main() | bsd-3-clause | -4,540,895,720,556,814,300 | 34.22 | 93 | 0.578977 | false |
dmpayton/django-fbi | django_fbi/views.py | 1 | 1651 | from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render_to_response
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django_fbi.app import apps
from django_fbi.backends import get_backend
from django_fbi.models import FacebookAccount
from django_fbi.signals import facebook_deauthorize
FBI_BACKEND = getattr(settings, 'FBI_BACKEND', 'django_fbi.backends.DefaultBackend')
def channel(request):
return HttpResponse('<script src="//connect.facebook.net/en_US/all.js"></script>')
def connect(request):
facebook_backend = get_backend(FBI_BACKEND)
return facebook_backend(request).connect_view()
connect = never_cache(connect)
def deauthorize(request):
facebook_backend = get_backend(FBI_BACKEND)
return facebook_backend(request).deauthorize_view()
deauthorize = csrf_exempt(deauthorize)
deauthorize = never_cache(deauthorize)
def view_app(request, slug, page):
try:
## Check the registry to see if we have Python app.
app = apps[slug]
return getattr('%s_view' % page, app)(request)
except (KeyError, NotImplemented):
## Nothing registered, check the database.
app = get_object_or_404(FacebookApp, namespace=slug)
context = RequestContext(request, {'app': app})
page_template = getattr(app, '%s_template' % page)
if page_template:
return render_to_response(page_template, context)
page_content = getattr(app, '%s_content' % page)
return HttpResponse(Template(page_content).render(context))
| mit | 6,748,672,571,937,231,000 | 40.275 | 86 | 0.727438 | false |
dotKom/onlineweb4 | utils/helpers.py | 1 | 1794 | # -*- coding: utf8 -*-
import json
from datetime import date, datetime
import pytz
from django.conf import settings
from django.db import models
from django.db.models.query import QuerySet
from django.utils.timezone import make_aware
class JsonHelper(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime("%d.%m.%Y %H.%M")
elif isinstance(obj, date):
return obj.strftime("%d.%m.%Y")
elif isinstance(obj, models.Model):
return obj.serializable_object()
elif isinstance(obj, QuerySet):
return list(obj)
return json.JSONEncoder.default(self, obj)
def humanize_size(size, suffix='B'):
"""
Converts an integer of bytes to a properly scaled human readable
string.
Example:
>>> humanize_size(15298253)
'14.6MB'
:param size: The size of the object in bytes as an integer
:return: A string of the formatted file size
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(size) < 1024.0:
return '%.1f%s%s' % (size, unit, suffix)
size /= 1024.0
return '%.1f%s%s' % (size, '', suffix)
def timezoneaware(dt, current_tz=settings.TIME_ZONE, is_dst=False):
"""
Transforms a potentially naive datetime into a timezone aware datetime,
by utilizing the locale setting from settigs.py
:param dt: A naive datetime instance.
:param is_dst: Boolean: Are we currently under daylight savings time (summertime)?
:return: A timezone-aware datetime
"""
tz = current_tz
try:
aware_dt = make_aware(dt, timezone=tz)
except (pytz.NonExistentTimeError, pytz.AmbiguousTimeError):
aware_dt = tz.localize(dt, is_dst=is_dst)
return aware_dt
| mit | -845,939,318,202,271,400 | 26.6 | 86 | 0.636009 | false |
CSC-IT-Center-for-Science/chipster-job-manager | jobmanager/tests/test_jobs_db.py | 1 | 8528 | from __future__ import unicode_literals
import pytest
import datetime
from sqlalchemy import create_engine, event
from sqlalchemy.orm import sessionmaker
from jobmanager.models import Base, JobNotFound
from jobmanager.models import (add_job, get_job, get_jobs, get_next_from_queue, update_job_comp,
update_job_results, update_job_reply_to,
update_job_rescheduled, update_job_cancelled,
update_job_running, purge_completed_jobs)
class TestDB(object):
def setUp(self):
engine = create_engine('sqlite:///:memory:')
def _fk_pragma_on_connect(dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
event.listen(engine, 'connect', _fk_pragma_on_connect)
Base.metadata.create_all(engine)
self.session = sessionmaker(bind=engine)()
def test_add_new_job(self):
add_job(self.session, "abc42", "Analysis Job", "{}", "session_id", "username")
jobs = [x for x in get_jobs(self.session)]
assert len(jobs) == 1
job = jobs[0]
assert job.job_id == 'abc42'
assert job.description == 'Analysis Job'
assert job.headers == '{}'
assert job.session_id == 'session_id'
def test_add_multiple_jobs(self):
add_job(self.session, "abc42", "analysis job", "{}", "session_id", "username")
jobs = [x for x in get_jobs(self.session)]
assert len(jobs) == 1
add_job(self.session, "abc43", "analysis Job ", "{}", "session_id", "username")
jobs = [x for x in get_jobs(self.session)]
assert len(jobs) == 2
def test_since_function(self):
add_job(self.session, "abc42", "Analysis Job", "{}", "session_id", "username")
for job in get_jobs(self.session):
assert job.seconds_since_created() >= 0
assert job.description == 'Analysis Job'
assert job.headers == '{}'
assert job.session_id == 'session_id'
def test_get_next_unsubmitted(self):
add_job(self.session, "abc42", "analysis job", "{}", "session_id", "username")
jobs = [x for x in get_jobs(self.session)]
assert len(jobs) == 1
add_job(self.session, "abc43", "analysis Job ", "{}", "session_id", "username")
jobs = [x for x in get_jobs(self.session)]
assert len(jobs) == 2
add_job(self.session, "abc44", "analysis Job ", "{}", "session_id", "username")
jobs = [x for x in get_jobs(self.session)]
assert len(jobs) == 3
update_job_comp(self.session, "abc42", "analysis_server_1")
job = get_next_from_queue(self.session)
assert job.job_id == "abc43"
def test_submit_job_to_comp(self):
add_job(self.session, "abc42", "analysis job", "{}", "session_id", "username")
update_job_comp(self.session, "abc42", "analysis_server_1")
jobs = [x for x in get_jobs(self.session)]
assert len(jobs) == 1
job = jobs[0]
assert job.job_id == 'abc42'
assert job.comp_id == 'analysis_server_1'
def test_get_all_jobs(self):
add_job(self.session, "abc42", "analysis job", "{}", "session_id", "username")
add_job(self.session, "abc43", "analysis job", "{}", "session_id", "username")
update_job_comp(self.session, "abc43", "analysis_server_1")
update_job_results(self.session, "abc43", "results", 'COMPLETED')
jobs_active = [x for x in get_jobs(self.session)]
jobs_all = [x for x in get_jobs(self.session, include_finished=True)]
assert len(jobs_active) == 1
assert len(jobs_all) == 2
def test_get_nonexisting_job(self):
with pytest.raises(JobNotFound):
get_job(self.session, "abc")
def test_submit_nonexisting_job_to_as(self):
with pytest.raises(JobNotFound):
update_job_comp(self.session, "abc42", "analysis_server_1")
def test_add_replyto_to_nonexisting_job(self):
with pytest.raises(JobNotFound):
update_job_reply_to(self.session, "abc42", "someaddr")
def test_job_update(self):
add_job(self.session, "abc42", "analysis job", "{}", "session_id", "username")
assert not get_job(self.session, "abc42").seen
update_job_running(self.session, "abc42")
assert get_job(self.session, "abc42")
def test_job_update_nonexistent(self):
with pytest.raises(JobNotFound):
update_job_running(self.session, "abc42")
def test_job_presentation(self):
add_job(self.session, "abc42", "analysis job", "{}", "session_id", "username")
job_str = get_job(self.session, "abc42")
assert "%s" % job_str == "<Job:abc42>"
def test_reschedule_nonexisting_job(self):
with pytest.raises(JobNotFound):
update_job_rescheduled(self.session, "abc42")
def test_reschedule_finished_job(self):
add_job(self.session, "abc42", "analysis job", "{}", "session_id", "username")
update_job_results(self.session, "abc42", "results", "COMPLETED")
with pytest.raises(RuntimeError):
update_job_rescheduled(self.session, "abc42")
def test_reschedule_job(self):
add_job(self.session, "abc42", "analysis job", "{}", "session_id", "username")
job = get_job(self.session, "abc42")
assert job.retries == 0
update_job_rescheduled(self.session, "abc42")
assert job.retries == 1
def test_cancel_job(self):
add_job(self.session, "abc42", "analysis job", "{}", "session_id", "username")
update_job_cancelled(self.session, "abc42")
job = get_job(self.session, "abc42")
assert job.finished
assert not job.results
def test_cancel_nonexistent(self):
with pytest.raises(JobNotFound):
update_job_cancelled(self.session, "abc42")
def test_cancel_completed(self):
add_job(self.session, "abc42", "analysis job", "{}", "session_id", "username")
update_job_results(self.session, "abc42", "results", "CANCELLED")
with pytest.raises(RuntimeError):
update_job_cancelled(self.session, "abc42")
def test_purge_completed(self):
add_job(self.session, "abc42", "analysis job", "{}", "session_id", "username")
jobs = [x for x in get_jobs(self.session)]
assert len(jobs) == 1
job = get_job(self.session, "abc42")
job.created = datetime.datetime.utcnow() - datetime.timedelta(10000)
update_job_results(self.session, "abc42", "results", "COMPLETED")
job.finished = datetime.datetime.utcnow() - datetime.timedelta(10000)
self.session.merge(job)
purge_completed_jobs(self.session)
jobs = [x for x in get_jobs(self.session)]
assert len(jobs) == 0
def test_dict_representation(self):
add_job(
self.session,
"daeadeb7-31c0-4279-a784-79bb5e35f73c",
('{"map":{"entry":[{"string":["analysisID","SortGtf.java"]},'
'{"string":["payload_unsorted.gtf","adf2c6af-fdf2-44e5-b4ad-0c3602653228"]},'
'{"string":["jobID","daeadeb7-31c0-4279-a784-79bb5e35f73c"]}]}}'),
('{"username": "test", "timestamp": "1417607038606", "expires":'
'"0", "reply-to": "/topic/foo", "class": '
'"fi.csc.microarray.messaging.message.JobMessage", "session-id":'
'"session_id", "destination": "/topic/bar", "persistent": "true",'
'"priority": "4", "multiplex-channel": "null", "message-id":'
'"8a96be12f61641b998fd57939e38bf98", "transformation":'
'"jms-map-json"}'),
"session_id", "username")
update_job_results(
self.session,
'daeadeb7-31c0-4279-a784-79bb5e35f73c',
('{"map":{"entry":[{"string":"errorMessage","null":""},'
'{"string":["sourceCode","chipster"]},{"string":["outputText","x"]},'
'{"string":["jobId","daeadeb7-31c0-4279-a784-79bb5e35f73c"]},'
'{"string":"heartbeat","boolean":false},'
'{"string":["payload_data-input-test.txt","b80b9f0a-9195-4447-a30d-403762d065ac"]},'
'{"string":["stateDetail",""]},{"string":["exitState","COMPLETED"]}]}}'),
'COMPLETED')
job = get_job(self.session, 'daeadeb7-31c0-4279-a784-79bb5e35f73c')
d = job.to_dict()
assert d['analysis_id'] == u'SortGtf.java'
assert d['job_id'] == 'daeadeb7-31c0-4279-a784-79bb5e35f73c'
| mit | 6,564,160,316,904,330,000 | 44.849462 | 97 | 0.591581 | false |
scott-maddox/openbandparams | src/openbandparams/examples/advanced/GaInAsSb_on_GaSb/Plot_Strained_Band_Offset_vs_Composition_of_Quaternary3.py | 1 | 2475 | #
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of openbandparams.
#
# openbandparams is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# openbandparams is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with openbandparams. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# Make sure we import the local openbandparams version
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../..')))
from openbandparams import *
import matplotlib.pyplot as plt
import numpy
# Type 3 Quaternary
alloy = GaInAsSb
# calculate the data
T = 300 # K
N = 100
xs = numpy.linspace(0, 1, N)
ys = numpy.linspace(0, 1, N)
X, Y = numpy.meshgrid(xs, ys)
Z = numpy.empty(shape=(N, N), dtype=numpy.double)
W = numpy.empty(shape=(N, N), dtype=numpy.double)
for i in xrange(N):
for j in xrange(N):
strained = alloy(x=X[i, j], y=Y[i, j]).strained_001(GaSb)
strain = strained.strain_out_of_plane(T=T)
if not (0. <= strain <= 0.03):
Z[i, j] = numpy.nan
W[i, j] = numpy.nan
else:
Z[i, j] = strained.VBO_hh(T=T) - GaSb.VBO()
W[i, j] = GaSb.CBO() - strained.CBO(T=T)
# plot it
fig = plt.figure()
CS = plt.contour(1-X, 1-Y, Z, 14, colors='r')
plt.clabel(CS, inline=True, fontsize=10)
CS2 = plt.contour(1-X, 1-Y, W, 12, colors='b')
plt.clabel(CS2, inline=True, fontsize=10)
plt.title('$%s/GaSb$ from 0 to 3%% strain (T = %.0f K)' % (alloy.latex(), T))
plt.xlabel('%s fraction' % alloy.elements[1])
plt.ylabel('%s fraction' % alloy.elements[3])
plt.plot([numpy.nan], [numpy.nan], 'b-', label='Conduction Band Offset')
plt.plot([numpy.nan], [numpy.nan], 'r-', label='Valance Band Offset')
plt.legend(loc='lower left')
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
output_filename = sys.argv[1]
plt.savefig(output_filename)
else:
plt.show() | agpl-3.0 | 8,786,756,771,074,003,000 | 33.873239 | 77 | 0.628283 | false |
gbiggs/rtcshell | rtcshell/rtls.py | 1 | 13317 | #!/usr/bin/env python
# -*- Python -*-
# -*- coding: utf-8 -*-
'''rtcshell
Copyright (C) 2009-2010
Geoffrey Biggs
RT-Synthesis Research Group
Intelligent Systems Research Institute,
National Institute of Advanced Industrial Science and Technology (AIST),
Japan
All rights reserved.
Licensed under the Eclipse Public License -v 1.0 (EPL)
http://www.opensource.org/licenses/eclipse-1.0.txt
File: rtls.py
Implementation of the command to list naming contexts.
'''
# $Source$
from optparse import OptionParser, OptionError
import os
from rtctree.exceptions import RtcTreeError
from rtctree.tree import create_rtctree, InvalidServiceError, \
FailedToNarrowRootNamingError, \
NonRootPathError
from rtctree.path import parse_path
from rtctree.utils import build_attr_string, colour_supported, \
get_num_columns_and_rows, get_terminal_size
import sys
from rtcshell import RTSH_PATH_USAGE, RTSH_VERSION
from rtcshell.path import cmd_path_to_full_path
def get_node_long_lines(nodes, use_colour=True):
info_strings = []
state_width = 0
total_width = 0
in_width = 0
out_width = 0
svc_width = 0
for node in nodes:
if node.is_directory:
if state_width == 0:
state_width = 1
if total_width == 0:
total_width = 1
if in_width == 0:
in_width = 1
if out_width == 0:
out_width = 1
if svc_width == 0:
svc_width = 1
name = build_attr_string(['bold', 'blue'],
supported=use_colour) + \
node.name + build_attr_string(['reset'],
supported=use_colour)
info_strings.append((('-', 0), ('-', 0), ('-', 0),
('-', 0), ('-', 0), name))
elif node.is_manager:
# Managers are not handled yet
if state_width == 0:
state_width = 1
if total_width == 0:
total_width = 1
if in_width == 0:
in_width = 1
if out_width == 0:
out_width = 1
if svc_width == 0:
svc_width = 1
name = build_attr_string(['bold', 'green'],
supported=use_colour) + \
node.name + build_attr_string(['reset'],
supported=use_colour)
info_strings.append((('-', 0), ('-', 0), ('-', 0),
('-', 0), ('-', 0), name))
elif node.is_component:
state = node.state
state_string = node.plain_state_string
if len(state_string) > state_width:
state_width = len(state_string)
state_string = (node.get_state_string(add_colour=use_colour),
len(node.get_state_string(add_colour=use_colour)) - \
len(state_string))
num_ports = len(node.ports)
num_connected = len(node.connected_ports)
total_string = '{0}/{1}'.format(num_ports, num_connected)
if len(total_string) > total_width:
total_width = len(total_string)
coloured_string = build_attr_string('bold',
supported=use_colour) + \
str(num_ports) + \
build_attr_string('reset',
supported=use_colour) + '/' + \
str(num_connected)
total_string = (coloured_string, len(coloured_string) - \
len(total_string))
num_ports = len(node.inports)
num_connected = len(node.connected_inports)
in_string = '{0}/{1}'.format(num_ports, num_connected)
if len(in_string) > in_width:
in_width = len(in_string)
coloured_string = build_attr_string('bold',
supported=use_colour) + \
str(num_ports) + \
build_attr_string('reset',
supported=use_colour) + '/' + \
str(num_connected)
in_string = (coloured_string, len(coloured_string) - \
len(in_string))
num_ports = len(node.outports)
num_connected = len(node.connected_outports)
out_string = '{0}/{1}'.format(num_ports, num_connected)
if len(out_string) > out_width:
out_width = len(out_string)
coloured_string = build_attr_string('bold',
supported=use_colour) + \
str(num_ports) + \
build_attr_string('reset',
supported=use_colour) + '/' + \
str(num_connected)
out_string = (coloured_string, len(coloured_string) - \
len(out_string))
num_ports = len(node.svcports)
num_connected = len(node.connected_svcports)
svc_string = '{0}/{1}'.format(num_ports, num_connected)
if len(svc_string) > svc_width:
svc_width = len(svc_string)
coloured_string = build_attr_string('bold',
supported=use_colour) + \
str(num_ports) + \
build_attr_string('reset',
supported=use_colour) + '/' + \
str(num_connected)
svc_string = (coloured_string, len(coloured_string) - \
len(svc_string))
info_strings.append((state_string, total_string, in_string,
out_string, svc_string, node.name))
else:
# Other types are unknowns
if state_width == 0:
state_width = 1
if total_width == 0:
total_width = 1
if in_width == 0:
in_width = 1
if out_width == 0:
out_width = 1
if svc_width == 0:
svc_width = 1
name = build_attr_string(['faint', 'white'],
supported=use_colour) + \
node.name + build_attr_string(['reset'],
supported=use_colour)
info_strings.append((('-', 0), ('-', 0), ('-', 0),
('-', 0), ('-', 0), name))
state_width += 2
total_width += 2
in_width += 2
out_width += 2
svc_width += 2
result = []
for string in info_strings:
result.append('{0}{1}{2}{3}{4}{5}'.format(
string[0][0].ljust(state_width + string[0][1]),
string[1][0].ljust(total_width + string[1][1]),
string[2][0].ljust(in_width + string[2][1]),
string[3][0].ljust(out_width + string[3][1]),
string[4][0].ljust(svc_width + string[4][1]),
string[5]))
return result
def format_items_list(items):
gap = ' '
term_rows, term_cols = get_terminal_size()
nrows, ncols, col_widths = get_num_columns_and_rows([len(ii[1]) \
for ii in items], len(gap), term_cols)
rows = [items[s:s + ncols] for s in range(0, len(items), ncols)]
lines = []
for r in rows:
new_line = ''
for ii, c in enumerate(r):
new_line += '{0:{1}}'.format(c[0], col_widths[ii] + \
(len(c[0]) - len(c[1]))) + gap
lines.append(new_line.rstrip())
return lines
def list_directory(dir_node, long=False):
listing = dir_node.children
use_colour = colour_supported(sys.stdout)
if long:
lines = get_node_long_lines(listing, use_colour=use_colour)
return lines
else:
items = []
for entry in listing:
if entry.is_directory:
items.append((build_attr_string(['bold', 'blue'],
supported=use_colour) + \
entry.name + '/' + \
build_attr_string(['reset'],
supported=use_colour),
entry.name))
elif entry.is_component:
items.append((entry.name, entry.name))
elif entry.is_manager:
items.append((build_attr_string(['bold', 'green'],
supported=use_colour) + \
entry.name + \
build_attr_string(['reset'],
supported=use_colour),
entry.name))
else:
items.append((build_attr_string(['faint', 'white'],
supported=use_colour) + \
entry.name + \
build_attr_string(['reset'],
supported=use_colour),
entry.name))
return format_items_list(items)
def list_target(cmd_path, full_path, options, tree=None):
path, port = parse_path(full_path)
if port:
# Can't list a port
print >>sys.stderr, '{0}: Cannot access {1}: No such directory or \
object.'.format(sys.argv[0], cmd_path)
return 1
trailing_slash = False
if not path[-1]:
# There was a trailing slash
trailing_slash = True
path = path[:-1]
if not tree:
tree = create_rtctree(paths=path)
if not tree:
return 1
if not tree.has_path(path):
print >>sys.stderr, '{0}: Cannot access {1}: No such directory or \
object.'.format(sys.argv[0], cmd_path)
return 1
if tree.is_component(path):
# Path points to a single component: print it like 'ls <file>'.
if trailing_slash:
# If there was a trailing slash, complain that a component is not a
# directory.
print >>sys.stderr, '{0}: cannot access {1}: Not a \
directory.'.format(sys.argv[0], address)
return 1
if options.long:
lines = get_node_long_lines([tree.get_node(path)],
sys.stdout.isatty())
for l in lines:
print l
else:
print path[-1]
elif tree.is_directory(path):
# If recursing, need to list this directory and all its children
if options.recurse:
recurse_root = tree.get_node(path)
recurse_root_path = recurse_root.full_path
def get_name(node, args):
if node.full_path.startswith(recurse_root_path):
result = node.full_path[len(recurse_root_path):]
else:
result = node.full_path
return result.lstrip('/')
dir_names = ['.'] + recurse_root.iterate(get_name,
args=options.long, filter=['is_directory'])[1:]
listings = recurse_root.iterate(list_directory,
args=options.long, filter=['is_directory'])
for dir, listing in zip(dir_names, listings):
if dir == '.':
print '.:'
else:
print './' + dir + ':'
for l in listing:
print l
print
else:
dir_node = tree.get_node(path)
lines = list_directory(dir_node, options.long)
for l in lines:
print l
else:
print >>sys.stderr, '{0}: cannot access {1}: Unknown object \
type.'.format(sys.argv[0], cmd_path)
return 1
return 0
def main(argv=None, tree=None):
usage = '''Usage: %prog [options] [path]
List a name server, directory, manager or component.
Equivalent to the POSIX 'ls' command.
The long display shows the following information in columns:
State
Total number of ports/Total connected
Input ports/Inputs connected
Output ports/Outputs connected
Service ports/Service connected
Name
''' + RTSH_PATH_USAGE
version = RTSH_VERSION
parser = OptionParser(usage=usage, version=version)
parser.add_option('-l', dest='long', action='store_true', default=False,
help='Use a long listing format.')
parser.add_option('-d', '--debug', dest='debug', action='store_true',
default=False, help='Print debugging information. \
[Default: %default]')
parser.add_option('-R', '--recurse', dest='recurse', action='store_true',
default=False, help='List recursively. [Default: %default]')
if argv:
sys.argv = [sys.argv[0]] + argv
try:
options, args = parser.parse_args()
except OptionError, e:
print 'OptionError:', e
return 1
if not args:
cmd_path = ''
elif len(args) == 1:
cmd_path = args[0]
else:
print >>sys.stderr, usage
return 1
full_path = cmd_path_to_full_path(cmd_path)
return list_target(cmd_path, full_path, options, tree)
# vim: tw=79
| epl-1.0 | 7,676,000,197,927,443,000 | 36.198324 | 79 | 0.490501 | false |
nodebox/nodebox-opengl | examples/07-filter/05-render.py | 1 | 2168 | # Add the upper directory (where the nodebox module is) to the search path.
import os, sys; sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics import *
# The render() command executes a function with drawing commands
# in an offscreen (i.e. hidden) canvas and returns an Image object.
# This is useful if you want to apply filters to text, ellipses, etc.
def hello():
fill(1, 0, 0, 0.5) # Transparent red.
ellipse(120, 120, 200, 200)
fill(0, 1, 0, 0.5) # Transparent green.
ellipse(170, 120, 200, 200)
fill(0, 0, 1, 0.5) # Transparent blue.
ellipse(145, 160, 200, 200)
fill(0)
font("Droid Serif")
text("hello", x=0, y=90, fontsize=70, width=300, align=CENTER)
# We call this a "procedural" image, because it is entirely created in code.
# Procedural images can be useful in many ways:
# - applying effects to text,
# - caching a complex composition that is not frequently updated (for speed),
# - creating on-the-fly textures or shapes that are different every time,
# - using NodeBox from the command line without opening an application window.
img = render(function=hello, width=300, height=300)
# Note that we make the width and height of the offscreen canvas
# a little bit larger than the actual composition.
# This creates a transparent border, so effects don't get cut off
# at the edge of the rendered image.
# Images can be saved to file, even without starting canvas.run().
# To try it out, uncomment the following line:
#img.save("hello.png")
def draw(canvas):
canvas.clear()
# Apply a blur filter to the procedural image and draw it.
image(blur(img, scale=canvas.mouse.relative_x), 20, 100)
# Compare to the same shapes drawn directly to the canvas.
# You may notice that the rendered image has jagged edges...
# For now, there is nothing to be done about that - a soft blur can help.
translate(300,100)
fill(1, 0, 0, 0.5)
ellipse(120, 120, 200, 200)
fill(0, 1, 0, 0.5)
ellipse(170, 120, 200, 200)
fill(0, 0, 1, 0.5)
ellipse(145, 160, 200, 200)
# Start the application:
canvas.fps = 60
canvas.size = 600, 500
canvas.run(draw)
| bsd-3-clause | 8,009,919,124,711,394,000 | 36.37931 | 78 | 0.686808 | false |
ace02000/pyload | module/plugins/accounts/SimplydebridCom.py | 1 | 1696 | # -*- coding: utf-8 -*-
import time
from module.plugins.internal.Account import Account
class SimplydebridCom(Account):
__name__ = "SimplydebridCom"
__type__ = "account"
__version__ = "0.15"
__status__ = "testing"
__config__ = [("mh_mode" , "all;listed;unlisted", "Filter hosters to use" , "all"),
("mh_list" , "str" , "Hoster list (comma separated)", "" ),
("mh_interval", "int" , "Reload interval in minutes" , 60 )]
__description__ = """Simply-Debrid.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("Kagenoshin", "[email protected]")]
def grab_hosters(self, user, password, data):
html = self.load("http://simply-debrid.com/api.php", get={'list': 1})
return [x.strip() for x in html.rstrip(';').replace("\"", "").split(";")]
def grab_info(self, user, password, data):
res = self.load("http://simply-debrid.com/api.php",
get={'login': 2,
'u' : user,
'p' : password})
data = [x.strip() for x in res.split(";")]
if str(data[0]) != "1":
return {'premium': False}
else:
return {'trafficleft': -1, 'validuntil': time.mktime(time.strptime(str(data[2]), "%d/%m/%Y"))}
def signin(self, user, password, data):
res = self.load("https://simply-debrid.com/api.php",
get={'login': 1,
'u' : user,
'p' : password})
if res != "02: loggin success":
self.fail_login()
| gpl-3.0 | 2,483,385,987,593,713,700 | 35.869565 | 106 | 0.470519 | false |
aliciawyy/CompInvest | load/load_data.py | 1 | 1779 | """
This is the general interface to load data, either we want to
load the data from internet through panda or load local data through
QSTK for tests.
@author Alice Wang
"""
import pandas.io.data as web
from load_local_data import load_local_data_from_yahoo
def load_stock_close_price(start_date, end_date, ls_symbols, source='yahoo'):
"""
@param start_date:
@param end_date:
@param ls_symbols:
@param source:
@return: The close prices of given symbols
"""
if source == 'google':
close_key = 'Close'
elif source == 'yahoo':
close_key = 'Adj Close'
elif source == 'local':
close_key = 'close'
all_stock_data = load_all_stock_data(start_date, end_date, ls_symbols, source)
stock_close_prices = all_stock_data[close_key]
stock_close_prices = stock_close_prices.fillna(method='ffill')
stock_close_prices = stock_close_prices.fillna(method='bfill')
stock_close_prices = stock_close_prices.fillna(1.0)
return stock_close_prices
def load_all_stock_data(start_date, end_date, ls_symbols, source='yahoo'):
"""
@param start_date: start date of loading
@param end_date: end date of loading
@param ls_symbols: list of symbols
@param source: source, to load from 'google', 'yahoo' or 'local'
"""
acceptable_sources = frozenset(['google', 'yahoo', 'local'])
if source not in acceptable_sources:
raise ValueError('The given source %s is not in acceptable sources %s' % (
source, str(acceptable_sources)))
if source == 'local':
all_stock_data = load_local_data_from_yahoo(start_date, end_date, ls_symbols)
else:
all_stock_data = web.DataReader(ls_symbols, source, start=start_date, end=end_date)
return all_stock_data
| mit | 1,172,195,368,799,243,300 | 32.566038 | 91 | 0.66498 | false |
edisonlz/fruit | web_project/base/site-packages/redis_model/models/dattributes.py | 1 | 28317 | # -*- coding: UTF-8 -*-
import os,sys
import datetime
import time
from redis_client import RedisClient
import types
import logging
#加载配置
import setting
from setting import logger
try:
from functools import wraps, update_wrapper
except ImportError:
from django.utils.functional import wraps, update_wrapper # Python 2.3, 2.4 fallback.
##########################Util Lib#############################
def queryset_to_dict(qs, key='pk'):
"""
Given a queryset will transform it into a dictionary based on ``key``.
param:
qs:queryset
key:string default is 'pk'
return:
dict
"""
return dict((getattr(u, key), u) for u in qs)
def distinct(l):
"""
Given an iterable will return a list of all distinct values.
param:
l:an iterable
return:
the list
"""
return list(set(l))
def attach_OneToOne(objects, model, field):
"""
Shortcut method which handles a pythonic LEFT OUTER JOIN.
``attach_foreignkey(posts, Post.thread)``
param:
objects:object of list
model: object
field:string
"""
try:
qs = model.objects.filter(pk__in=distinct(getattr(o, "pk") for o in objects))
queryset = queryset_to_dict(qs)
for o in objects:
setattr(o, '_%s_cache' % (field), queryset.get(getattr(o, "pk")))
#print getattr(o, '_%s_cache' % (field))
#print o.userinfo
except Exception,e:
print e
def attach_foreignkey(objects, field, qs=None):
"""
Shortcut method which handles a pythonic LEFT OUTER JOIN.
``attach_foreignkey(posts, Post.thread)``
param:
objects:object of list
field:string
qs:query set default is None
"""
try:
t1 = time.time()
field = field.field
if qs is None:
qs = field.rel.to.objects.all()
qs = qs.filter(pk__in=distinct(getattr(o, field.column) for o in objects))
#if select_related:
# qs = qs.select_related(*select_related)
queryset = queryset_to_dict(qs)
if queryset:
for o in objects:
setattr(o, '_%s_cache' % (field.name), queryset.get(getattr(o, field.column)))
#print "attach_foreignkey use %s s " % (time.time() - t1)
except Exception,e:
print e
##########################Util Lib#############################
def find_include(ref_klass,pks,kwargs):
"""
search the related object from current object
param;
ref_klass:related classs
pks:primary key
**kwargs:
order_by_score: True or False
include_select_related_model:True or False
include:True or False
select_related:True or False
"""
if not pks:
return []
order_by_score = kwargs.get("order_by_score",False)
include_select_related_model = kwargs.get("include_select_related_model")
#默认是开启select_related()
model = kwargs.get("include")
if model:
#1.fitler objects
#print "model_name %s:" % model.__name__
#print ref_klass.__name__,ref_klass.objects
#mobjs = ref_klass.objects.filter(id__in=pks).order_by('-pk')
n = datetime.datetime.now()
if order_by_score:
ids = ",".join(pks)
if ref_klass.__name__.lower() != "user":
sql = "SELECT * FROM %s where id in (%s) and status in(0,1) order by FIELD(id, %s)" % (ref_klass._meta.db_table,ids,ids)
else:
sql = "SELECT * FROM %s where id in (%s) order by FIELD(id, %s)" % (ref_klass._meta.db_table,ids,ids)
mobjs = ref_klass.objects.raw(sql)
else:
mobjs = ref_klass.objects.filter(id__in=pks)
logging.debug(" %s include use: %s" % (ref_klass,datetime.datetime.now() - n))
n = datetime.datetime.now()
#2.fitler relate objects
relate_ids = set()
for obj in mobjs:
v = getattr(obj,"%s_id" % model.__name__.lower())
if v:
relate_ids.add(v)
#print "relate_ids %s:" % relate_ids
#3.0 得到relate ID
if relate_ids:
robjs = model.objects.filter(id__in=tuple(relate_ids))
#print "relate_ids len %s:" % len(robjs)
rdic = {}
for r in robjs:
rdic[r.id] = r
if include_select_related_model:
#print "deal userinfo"
attach_OneToOne(robjs,include_select_related_model,include_select_related_model.__name__.lower())
#3.set relate objects
for obj in mobjs:
setattr(obj,model.__name__.lower(),rdic.get(getattr(obj,"%s_id" % model.__name__.lower())))
logging.debug(" %s relate add use: %s" % (ref_klass,datetime.datetime.now() - n))
#4.返回关联对象
return mobjs
elif kwargs.get("select_related",False):
return ref_klass.objects.select_related(depth=1).filter(id__in=pks)
else:
if order_by_score:
ids = ",".join(pks)
if ref_klass.__name__.lower() != "user":
sql = "SELECT * FROM %s where id in (%s) and status in (0,1) order by FIELD(id, %s)" % (ref_klass._meta.db_table,ids,ids)
else:
sql = "SELECT * FROM %s where id in (%s) order by FIELD(id, %s)" % (ref_klass._meta.db_table,ids,ids)
data = []
for d in ref_klass.objects.raw(sql):
data.append(d)
return data
else:
data = ref_klass.objects.filter(id__in=pks)
return data
class DAttribute(object):
def __init__(self):
"""
intialize base object reference object decsription
"""
#Base Object
self.bo = None
self.ref = None
self.descrpition = ""
def change_log(self,oper,obj_id,baseobj,pipe=None,score=None):
"""
save the relation of Reference
list|sortset:insert:user_posts:user_id:post_id
list|sortset:delete:user_posts:user_id:post_id
param:
oper: the operation type is string
obj_id: id of object type is integer
baseobj: base object
pipe: redis pipe default is None
score: use rank
"""
#是否启用数据同步
if not setting.DATA_SYNC:
return
#初始化服务
dp = pipe or RedisClient.getInstance().redis
#保存chang_log
#String = 操作符: 主类型_引用类型s : 主类型ID: 此类型ID
basetype = str(baseobj.__class__.__name__).lower()
ref = self.ref.lower()
if basetype == ref:
ref = self.name.lower()
if oper.startswith("sortset"):
val = "%(oper)s:_:%(model_type)s_%(relate_type)ss:_:%(id)s:_:%(rid)s:_:%(score)s" % {"oper":oper,"model_type": basetype,"relate_type": ref,"id":baseobj.id,"rid" : obj_id ,"score":score}
else:
val = "%(oper)s:_:%(model_type)s_%(relate_type)ss:_:%(id)s:_:%(rid)s" % {"oper":oper,"model_type": basetype,"relate_type": ref,"id":baseobj.id,"rid" : obj_id}
logger.info("sync: " + val)
#保存数据dao Redis List Queue
dp.lpush("change_log",val)
@property
def ref_klass(self):
"""
Reference the object
return:
the object of self's Reference
"""
from django.db import models
if self.ref:
_known_models = {}
for klass in models.Model.__subclasses__():
if hasattr(klass,"objects"):
_known_models[klass.__name__] = klass
for sub in klass.__subclasses__():
if hasattr(sub,"objects"):
_known_models[sub.__name__] = sub
for ssub in sub .__subclasses__():
if hasattr(ssub,"objects"):
_known_models[ssub.__name__] = ssub
return _known_models.get(self.ref, None)
"""
属性对象
"""
def set(self,instance,val):
"""
set the object's name value
param:
instance:the name type is string
val: the value type is string
"""
setattr(instance,self.name,val)
def __set__(self,instance,val):
"""
set the object's name value
param:
instance:the name type is string
val: the value type is string
"""
setattr(instance,"_"+self.name,val)
def acceptable_types(self):
"""
get the basestring it is python
return:
string
"""
return basestring
def validate(self,instance):
"""
validate the effective of data
param:
instance:object
"""
if self.required:
if not self:
instance._errors.append("%s require" % self)
########################################### Start Oper Decorater#######################################
def operKey(obj,field):
"""
operate Key
param:
obj:object
field:string
return:
string
"""
return "%s:id:%s:%s" % (obj.__class__.__name__,obj.id, field)
def operSet(fn):
"""
this Decoration method
add operation
"""
def _new(self, *args, **kws):
try:
baseobj = args[0]
obj = args[1]
#检查有效性
if not obj:
logger.error("please input dest object")
raise StandardError("please input dest object")
if hasattr(obj,"id") or hasattr(obj,"_id"):
#key = "user:id:1:posts"
key = operKey(baseobj,self.name) #"%s:id:%s:%s" % (baseobj.__class__.__name__,baseobj.id, self.name)
kws["obj"] = obj
kws["baseobj"] = baseobj
fn(self,key,obj.id, **kws)
else:
logger.error("please object is new not have object.id")
raise StandardError("please object is new not have object.id")
except Exception,e:
logger.error(e)
return False
return True
#包装函数
return wraps(fn)(_new)
def operGet(fn):
"""
this is Decoration method
get opearte
"""
def _new(self, *args, **kws):
try:
obj = args[0]
#print obj.id
if hasattr(obj,"id") or hasattr(obj,"_id"):
#如果有ID只保存ID
#key = "user:id:1:posts"
key = operKey(obj,self.name) #"%s:id:%s:%s" % (obj.__class__.__name__,obj.id, self.name)
args = args[1:]
kws["obj"] = obj
return fn(self,key, *args, **kws)
else:
logger.error("please object is new not have object.id")
raise StandardError("please object is new not have object.id")
except Exception,e:
logger.error(e)
return None
#包装函数
return wraps(fn)(_new)
########################################### End Oper Decorater#######################################
class DListField(DAttribute):
def __init__(self,ref=None,required=False,name = None):
"""
initialize
param:
ref:object default is None
required:True or false default is False
name:string
"""
super(DAttribute,self).__init__()
self.ref = ref
self.index = False
self.required = required
self.name = name
""" 添加List 方法 """
@operSet
def lpush(self,key,value,**kwargs):
"""
LPUSH key value Append an element to the head of the List value at key
param;
key:string
value:string
**kwargs: a dict
obj:object
baseobj:base object
return:
True or False
"""
#print "listfield lpush ",key,",",value
try:
if setting.Debug:
n = datetime.datetime.now()
pipe = RedisClient.getInstance().redis.pipeline()
pipe.lpush(key,value)
self.change_log("list:insert",kwargs["obj"].id,kwargs["baseobj"],pipe)
pipe.execute()
if setting.Debug:
logger.info(" lpush key: %s,use : %s" % (key,datetime.datetime.now() - n))
return True
except Exception,e:
pipe.reset()
logger.error(e)
return False
@operSet
def rpush(self,key,value,**kwargs):
"""
push the data into list of redis at right of list
param;
key:string
value:string
**kwargs: a dict
obj:object
baseobj:base object
return:
True or False
"""
#Save
#print "listfield rpush ",key,",",value
try:
pipe = RedisClient.getInstance().redis.pipeline()
pipe.rpush(key,value)
self.change_log("list:insert",kwargs["obj"].id,kwargs["baseobj"],pipe)
pipe.execute()
return True
except Exception,e:
pipe.reset()
logger.error(e)
return False
@operGet
def lpop(self,key,**kwargs):
"""
LPOP key Return and remove (atomically) the first element of the List at key
param;
key:string
**kwargs: a dict
obj:object
return:
object
"""
# LPOP key Return and remove (atomically) the first element of the List at key
#print "lpop key",key
pk = RedisClient.getInstance().redis.lpop(key)
self.change_log("list:delete",pk,kwargs["obj"])
objs = self.ref_klass.objects.filter(id=pk)
if objs:
return objs[0]
return None
@operGet
def rpop(self,key,**kwargs):
"""
RPOP key Return and remove (atomically) the first element of the List at key
param;
key:string
**kwargs: a dict
obj:object
return:
object
"""
#print "rpop key",key
pk = RedisClient.getInstance().redis.rpop(key)
self.change_log("list:delete",pk,kwargs["obj"])
objs = self.ref_klass.objects.filter(id=pk)
if objs:
return objs[0]
return None
@operGet
def llen(self,key,**kwargs):
"""
LLEN key Return the length of the List value at key
param;
key:string
**kwargs: a dict
return:
integer of length
"""
#print "len key",key
return RedisClient.getInstance().redis.llen(key)
@operGet
def lrange(self,key,start=0,end=10,**kwargs):
"""
LRANGE key start end Return a range of elements from the List at key
param:
key:string
start:integer default is 0
end:integer default is 10
**kwargs:dict
return:
the data in list
"""
if setting.Debug:
n = datetime.datetime.now()
pks = RedisClient.getInstance().redis.lrange(key,start,end)
if setting.Debug:
logger.info("lrange key: %s,start: %s, end: %s ,use : %s" % (key,start,end,datetime.datetime.now() - n))
#返回相关对象集合
return find_include(self.ref_klass,pks,kwargs)
#return self.ref_klass.objects.filter(pk__in = pks)
## @operGet
## def ltrim(self,key,start=0,end=10):
## 这个不支持同步
## # LTRIM key start end Trim the list at key to the specified range of elements
## return RedisClient.getInstance().redis.ltrim(key,start,end)
@operSet
def lrem(self,key,id,count=1,**kwargs):
"""
LREM key count value Remove the first-N, last-N, or all the elements matching value from the List at key
param:
key:string
count:integer default is 1
id:integer
**kwargs:dict
baseobj:base object
return:
True or False
"""
#print "rem key",key
#print "rem value",id
try:
pipe = RedisClient.getInstance().redis.pipeline()
pipe.lrem(key,id,count)
self.change_log("list:delete",id,kwargs["baseobj"])
pipe.execute()
return True
except Exception,e:
pipe.reset()
logger.error(e)
return False
@operGet
def delete(self,key,pipe,**kwargs):
"""
delete the list use index
param:
key: string
pipe: redis pipe
return:
True or false
"""
db = pipe | RedisClient.getInstance().redis
return db.delete(key)
class DSetField(DAttribute):
#常量定义
redis = RedisClient.getInstance().redis
def __init__(self,ref=None,required=False,name=None):
"""
initialize reference object name index and required
param:
ref:reference object
required:True or False
name:string
"""
super(DAttribute,self).__init__()
self.ref = ref
self.name = name
self.index = False
self.required = required
@operSet
def sadd(self,key,member,**kwargs):
"""
SADD key member Add the specified member to the Set value at key
param:
key:string
member:string
**kwargs:include obj and baseobj
obj:the object
baseobj: base object
return:
True or False
"""
try:
if setting.Debug:
n = datetime.datetime.now()
pipe = DSetField.redis.pipeline()
pipe.sadd(key,member)
self.change_log("set:insert",kwargs["obj"].id,kwargs["baseobj"],pipe)
pipe.execute()
return True
except Exception,e:
pipe.reset()
logger.error(e)
return False
#RedisClient.getInstance().redis.sadd(key,member)
@operGet
def spop(self,key,**kwargs):
"""
SPOP key Remove and return (pop) a random element from the Set value at key
param:
key:string
**kwargs:include obj
obj:the object
return:
object
"""
# SPOP key Remove and return (pop) a random element from the Set value at key
pk = DSetField.redis.spop(key)
self.change_log("set:delete",pk,kwargs["obj"])
#print '#'*10
#print pk
#print self.ref_klass
#print '#'*10
objs = self.ref_klass.objects.filter(pk=pk)
if objs:
return objs[0]
return None
@operSet
def srem(self,key,member,**kwargs):
"""
SREM key member Remove the specified member from the Set value at key
param:
key:string
member:string
**kwargs:include baseobj
baseobj: base object
return:
True or False
"""
#SREM key member Remove the specified member from the Set value at key
try:
pipe = DSetField.redis.pipeline()
pipe.srem(key,member)
self.change_log("list:delete",member,kwargs["baseobj"])
pipe.execute()
return True
except Exception,e:
pipe.reset()
logger.error(e)
return False
#RedisClient.getInstance().redis.srem(key,member)
@operGet
def scard(self,key,**kwargs):
"""
SCARD key Return the number of elements (the cardinality) of the Set at key
param:
key:string
**kwargs:dict
return:
count of set by key
"""
return DSetField.redis.scard(key)
@operGet
def sismember(self,key,member_id,**kwargs):
# SISMEMBER key member Test if the specified value is a member of the Set at key
return DSetField.redis.sismember(key,member_id)
@operGet
def smembers(self,key,**kwargs):
"""
SMEMBERS key Return all the members of the Set value at key
param:
key:string
**kwargs:dict
return:
objects of list
"""
# SMEMBERS key Return all the members of the Set value at key
if setting.Debug:
n = datetime.datetime.now()
pks = DSetField.redis.smembers(key)
if kwargs.get("only_ids",False):
return pks
return self.ref_klass.objects.filter(pk__in = pks)
@operGet
def delete(self,key,pipe,**kwargs):
"""
delete the value of key
param;
key:string
pipe:redis
**kwargs:dict
return:
True or False
"""
db = pipe | DSetField.redis
return db.delete(key)
class DSortSetField(DAttribute):
#常量定义
redis = RedisClient.getInstance().redis
def __init__(self,ref=None,required=False,index = False,name=None,limit=500):
"""
initialize name index reference object limit
param:
ref:reference object
required:True or False
index:True or False
name:string default is None
limit:integer default is 1000
"""
super(DAttribute,self).__init__()
self.name = name
self.index = index
self.ref = ref
self.required = required
#限制最大幅度,设置为0为不限制
self.limit = limit
@operSet
def zadd(self,key ,member ,score,**kwargs):
"""
add the member into the sorted set by score
if the member is exist then update it's score
param:
key:string
member:string
score:rank integer
**kwargs:include obj and baseobj
obj:object
baseobj:base object
return:
True or False
"""
try:
if setting.Debug:
n = datetime.datetime.now()
pipe = DSortSetField.redis.pipeline()
pipe.zadd(key ,member ,score)
self.change_log("sortset:insert",kwargs["obj"].id,kwargs["baseobj"],pipe,score)
pipe.execute()
#Start 删除超过LIMIT的
if self.limit > 0:
zcard = DSortSetField.redis.zcard(key)
#print "zcard",zcard
if zcard > self.limit:
#print "* " * 20
#print "Start 删除超过LIMIT的"
#print "rem %s " % key
delete_to = zcard - self.limit
DSortSetField.redis.zremrangebyrank(key,0,delete_to)
#End
return True
except Exception,e:
pipe.reset()
logger.error(e)
return False
#return RedisClient.getInstance().redis.zadd(key ,member ,score)
@operGet
def zrank(self, key ,member_id,**kwargs):
"""
get the the index of member in sorted set
in front is the lowest score
param:
key:string
member_id:integer
**kwargs:dict
return:
integer
"""
return DSortSetField.redis.zrank( key , member_id)
@operGet
def zrevrank( self,key , member_id,**kwargs):
"""
get the the index of member in sorted set
in front is the highest score
param:
key:string
member_id:integer
**kwargs:dict
return:
integer
"""
return DSortSetField.redis.zrevrank( key ,member_id)
@operGet
def zrange(self, key , start=0, end=10,**kwargs):
"""
get the the member in sorted set between start and end
in front is the lowest score
param:
key:string
start:integer
end:integer
**kwargs:dict
return:
members of list
"""
pks = DSortSetField.redis.zrange( key ,start, end) or []
if kwargs.get("only_ids",False):
return pks
else:
return find_include(self.ref_klass,pks,kwargs)
@operGet
def zrevrange(self, key ,start=0, end=10,**kwargs):
"""
get the the index of member in sorted set
in front is the lowest score highest in the back
param:
key:string
member_id:integer
**kwargs:dict
return:
integer
"""
if setting.Debug:
n = datetime.datetime.now()
withscores = kwargs.get("withscores",False)
#t = time.time()
data = DSortSetField.redis.zrevrange( key ,start, end,withscores = withscores) or []
#print "zrevrange use:" ,time.time() - t
#读取的时候带 score
if withscores:
pks = []
scores = {}
for d in data:
pks.append(d[0])
scores[d[0]] = d[1]
else:
pks = data
#print "withscores use:" ,time.time() - t
if kwargs.get("only_ids",False):
return pks
else:
mobjs = find_include(self.ref_klass,tuple(pks),kwargs)
#print "find_include use:" ,time.time() - t
#这里将得分设置为对象的属性
if withscores and mobjs:
m_raws = []
for obj in mobjs:
setattr(obj,"rd_score",scores[str(obj.pk)])
m_raws.append(obj)
mobjs = m_raws
return mobjs
@operGet
def zrangebyscore(self, key ,min, max,**kwargs):
"""
get the the member in sorted set between min and max
param:
key:string
min:integer
max:integer
**kwargs:dict
return:
members of list
"""
pks = DSortSetField.redis.zrangebyscore( key ,min, max) or []
return self.ref_klass.objects.filter(pk__in = pks)
@operGet
def zscore(self, key ,member,**kwargs):
"""
get the score of member
param:
key:string
member_id:integer
**kwargs:dict
return:
score
"""
return DSortSetField.redis.zscore( key ,member.id)
@operGet
def zcard(self, key,**kwargs ):
"""
get the base integer of sorted set
param:
key:string
**kwarg:dict
return:
count of list
"""
return DSortSetField.redis.zcard( key )
@operSet
def zrem(self, key,member_id,**kwargs):
"""
delete the member in sorted set
param:
key:string
member_id:integer
**kwargs:dict
return:
True or False
"""
try:
DSortSetField.redis.zrem( key,member_id)
return True
except Exception,e:
logger.error(e)
return False
@operGet
def zremrangebyrank(self,key,min_rank=0,max_rank=1,**kwargs):
"""
maintain the size of list
pop one object every time
param:
key:string
min_rank:integer default is 0
max_rank:integer default is 1
**kwargs:dict
retrun:
True or False
"""
try:
DSortSetField.redis.zremrangebyrank(key,min_rank,max_rank)
return True
except Exception,e:
logger.error(e)
return False
| apache-2.0 | 6,926,544,624,200,830,000 | 29.264579 | 197 | 0.511115 | false |
NLeSC/eEcology-script-wrapper | script_wrapper/tests/tasks/test_classification.py | 1 | 2262 | # Copyright 2013 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch
from script_wrapper.validation import Invalid
from script_wrapper.tasks.classification import Classification
class TestClassification(unittest.TestCase):
def test_matlab_version(self):
task = Classification()
self.assertEqual(task.matlab_version, '2012a')
@patch('script_wrapper.tasks.classification.getAccelerationCount')
def test_formfields2taskargs(self, gac):
gac.return_value = 10000
task = Classification()
formquery = {'tracker_id': 1234,
'start': '2013-01-01T00:00:00',
'end': '2013-10-10T00:00:00',
'plot_accel': False,
}
taskargs = task.formfields2taskargs(formquery,
'postgresql://localhost')
etaskargs = {'db_url': 'postgresql://localhost',
'start': '2013-01-01T00:00:00',
'end': '2013-10-10T00:00:00',
'tracker_id': 1234,
'plot_accel': False,
}
self.assertEqual(taskargs, etaskargs)
@patch('script_wrapper.tasks.classification.getAccelerationCount')
def test_formfields2taskargs_invalid(self, gac):
gac.return_value = 10000000
task = Classification()
formquery = {'tracker_id': 1234,
'start': '2013-01-01T00:00:00',
'end': '2013-10-10T00:00:00',
'plot_accel': False,
}
with self.assertRaises(Invalid):
task.formfields2taskargs(formquery, 'postgresql://localhost')
| apache-2.0 | 2,386,582,424,575,353,000 | 35.483871 | 74 | 0.61008 | false |
Laurawly/tvm-1 | tests/python/driver/tvmc/test_frontends.py | 1 | 7125 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tarfile
import pytest
from tvm.ir.module import IRModule
from tvm.driver import tvmc
from tvm.driver.tvmc.common import TVMCException
def test_get_frontends_contains_only_strings():
sut = tvmc.frontends.get_frontend_names()
assert all([type(x) is str for x in sut]) is True
def test_get_frontend_by_name_valid():
# some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
pytest.importorskip("tensorflow")
sut = tvmc.frontends.get_frontend_by_name("keras")
assert type(sut) is tvmc.frontends.KerasFrontend
def test_get_frontend_by_name_invalid():
with pytest.raises(TVMCException):
tvmc.frontends.get_frontend_by_name("unsupported_thing")
def test_guess_frontend_tflite():
# some CI environments wont offer TFLite, so skip in case it is not present
pytest.importorskip("tflite")
sut = tvmc.frontends.guess_frontend("a_model.tflite")
assert type(sut) is tvmc.frontends.TFLiteFrontend
def test_guess_frontend_onnx():
# some CI environments wont offer onnx, so skip in case it is not present
pytest.importorskip("onnx")
sut = tvmc.frontends.guess_frontend("a_model.onnx")
assert type(sut) is tvmc.frontends.OnnxFrontend
def test_guess_frontend_pytorch():
# some CI environments wont offer pytorch, so skip in case it is not present
pytest.importorskip("torch")
sut = tvmc.frontends.guess_frontend("a_model.pth")
assert type(sut) is tvmc.frontends.PyTorchFrontend
def test_guess_frontend_keras():
# some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
pytest.importorskip("tensorflow")
sut = tvmc.frontends.guess_frontend("a_model.h5")
assert type(sut) is tvmc.frontends.KerasFrontend
def test_guess_frontend_tensorflow():
# some CI environments wont offer TensorFlow, so skip in case it is not present
pytest.importorskip("tensorflow")
sut = tvmc.frontends.guess_frontend("a_model.pb")
assert type(sut) is tvmc.frontends.TensorflowFrontend
def test_guess_frontend_invalid():
with pytest.raises(TVMCException):
tvmc.frontends.guess_frontend("not/a/file.txt")
def test_load_model__invalid_path__no_language():
# some CI environments wont offer TFLite, so skip in case it is not present
pytest.importorskip("tflite")
with pytest.raises(FileNotFoundError):
tvmc.frontends.load_model("not/a/file.tflite")
def test_load_model__invalid_path__with_language():
# some CI environments wont offer onnx, so skip in case it is not present
pytest.importorskip("onnx")
with pytest.raises(FileNotFoundError):
tvmc.frontends.load_model("not/a/file.txt", model_format="onnx")
def test_load_model__tflite(tflite_mobilenet_v1_1_quant):
# some CI environments wont offer TFLite, so skip in case it is not present
pytest.importorskip("tflite")
mod, params = tvmc.frontends.load_model(tflite_mobilenet_v1_1_quant)
assert type(mod) is IRModule
assert type(params) is dict
# check whether one known value is part of the params dict
assert "_param_1" in params.keys()
def test_load_model__keras(keras_resnet50):
# some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
pytest.importorskip("tensorflow")
mod, params = tvmc.frontends.load_model(keras_resnet50)
assert type(mod) is IRModule
assert type(params) is dict
## check whether one known value is part of the params dict
assert "_param_1" in params.keys()
def test_load_model__onnx(onnx_resnet50):
# some CI environments wont offer onnx, so skip in case it is not present
pytest.importorskip("onnx")
mod, params = tvmc.frontends.load_model(onnx_resnet50)
assert type(mod) is IRModule
assert type(params) is dict
## check whether one known value is part of the params dict
assert "resnetv24_batchnorm0_gamma" in params.keys()
def test_load_model__pb(pb_mobilenet_v1_1_quant):
# some CI environments wont offer TensorFlow, so skip in case it is not present
pytest.importorskip("tensorflow")
mod, params = tvmc.frontends.load_model(pb_mobilenet_v1_1_quant)
assert type(mod) is IRModule
assert type(params) is dict
# check whether one known value is part of the params dict
assert "MobilenetV1/Conv2d_0/weights" in params.keys()
def test_load_model___wrong_language__to_keras(tflite_mobilenet_v1_1_quant):
# some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
pytest.importorskip("tensorflow")
with pytest.raises(OSError):
tvmc.frontends.load_model(tflite_mobilenet_v1_1_quant, model_format="keras")
def test_load_model___wrong_language__to_tflite(keras_resnet50):
# some CI environments wont offer TFLite, so skip in case it is not present
pytest.importorskip("tflite")
with pytest.raises(TVMCException):
tvmc.frontends.load_model(keras_resnet50, model_format="tflite")
def test_load_model___wrong_language__to_onnx(tflite_mobilenet_v1_1_quant):
# some CI environments wont offer onnx, so skip in case it is not present
pytest.importorskip("onnx")
from google.protobuf.message import DecodeError
with pytest.raises(DecodeError):
tvmc.frontends.load_model(tflite_mobilenet_v1_1_quant, model_format="onnx")
@pytest.mark.skip(reason="https://github.com/apache/tvm/issues/7455")
def test_load_model__pth(pytorch_resnet18):
# some CI environments wont offer torch, so skip in case it is not present
pytest.importorskip("torch")
pytest.importorskip("torchvision")
mod, params = tvmc.frontends.load_model(
pytorch_resnet18, shape_dict={"input": [1, 3, 224, 224]}
)
assert type(mod) is IRModule
assert type(params) is dict
# check whether one known value is part of the params dict
assert "layer1.0.conv1.weight" in params.keys()
def test_load_model___wrong_language__to_pytorch(tflite_mobilenet_v1_1_quant):
# some CI environments wont offer pytorch, so skip in case it is not present
pytest.importorskip("torch")
with pytest.raises(RuntimeError) as e:
tvmc.frontends.load_model(
tflite_mobilenet_v1_1_quant,
model_format="pytorch",
shape_dict={"input": [1, 3, 224, 224]},
)
| apache-2.0 | 9,198,593,536,956,242,000 | 34.447761 | 89 | 0.723228 | false |
south-coast-science/scs_mfr | src/scs_mfr/test/gps_test.py | 1 | 1442 | """
Created on 18 May 2017
@author: Bruno Beloff ([email protected])
"""
import sys
from scs_core.position.nmea.gprmc import GPRMC
from scs_dfe.gps.pam_7q import PAM7Q
from scs_host.bus.i2c import I2C
from scs_host.sys.host import Host
from scs_mfr.test.test import Test
# --------------------------------------------------------------------------------------------------------------------
class GPSTest(Test):
"""
test script
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, interface, verbose):
super().__init__(interface, verbose)
# ----------------------------------------------------------------------------------------------------------------
def conduct(self):
if self.verbose:
print("GPS...", file=sys.stderr)
gps = None
try:
I2C.Sensors.open()
# GPS...
gps = PAM7Q(self.interface, Host.gps_device())
gps.power_on()
gps.open()
# test...
self._datum = gps.report(GPRMC)
if self.verbose:
print(self._datum, file=sys.stderr)
# criterion...
return self._datum is not None
finally:
if gps:
gps.close()
gps.power_off()
I2C.Sensors.close()
| mit | 4,718,465,073,288,030,000 | 21.888889 | 118 | 0.398752 | false |
bfalacerda/strands_executive | task_executor/scripts/mdp_task_executor.py | 1 | 63607 | #!/usr/bin/env python
from __future__ import with_statement
import rospy
from Queue import Queue, Empty
from strands_executive_msgs.msg import Task, ExecutionStatus, DurationMatrix, DurationList, ExecutePolicyAction, ExecutePolicyFeedback, ExecutePolicyGoal, MdpStateVar, StringIntPair, StringTriple, MdpAction, MdpActionOutcome, MdpDomainSpec, TaskEvent
from strands_executive_msgs.srv import GetGuaranteesForCoSafeTask, GetGuaranteesForCoSafeTaskRequest, AddCoSafeTasks, DemandCoSafeTask, GetBlacklistedNodes
from task_executor.base_executor import BaseTaskExecutor
from threading import Thread, Condition
from task_executor.execution_schedule import ExecutionSchedule
from operator import attrgetter
from math import floor
import threading
import actionlib
from task_executor.SortedCollection import SortedCollection
from task_executor.utils import rostime_to_python, rostime_close, get_start_node_ids, ros_duration_to_string, ros_time_to_string, max_duration
from dateutil.tz import tzlocal
from copy import copy, deepcopy
from actionlib_msgs.msg import GoalStatus
from rosgraph_msgs.msg import Clock
ZERO = rospy.Duration(0)
class MDPTask(object):
"""
Class to store task and mdp related stuff together.
"""
def __init__(self, task, state_var, action, is_ltl = False, is_on_demand = False, is_interruptible = True):
self.task = task
self.state_var = state_var
self.action = action
self.is_ltl = is_ltl
self.is_mdp_spec = False
self.mdp_spec = None
self.is_on_demand = is_on_demand
self.is_interruptible = is_interruptible
def _set_mdp_spec(self, mdp_spec):
self.mdp_spec = mdp_spec
self.is_mdp_spec = True
self.is_ltl = False
class MDPTaskExecutor(BaseTaskExecutor):
"""
Executor which receives Task objects and converts them into MdpActions and manages their execution.
This distinguishes between three different types of tasks:
a) On demand tasks, which should be executed immediately.
b) Time-critical tasks, which should be executed as close to their start time as possible
c) Normal tasks, which should preferably (but not necessarily) be executed within their time window as possible
On demand tasks are added by the demand task service. The other types are added by the add task service. Time critical tasks are identified by having the same start and end time.
This executor respects task priorities and interruptibility in as far as tasks which declare themselves as uninterruptible will not be interrupted by a same or lower priority on-demand task, and no uninterruptible task will be cancelled due to a timeout.
The clear schedule service cancels all execution (regardless of uninterruptibility state) removes all tasks from the executor.
The executor publishes a schedule which is an ordering over tasks indicating the approximate order they will be considered for execution.
Normal tasks are sent to MDP execution in batches. These batches are limited to a configurable size (rosparam ~mdp_batch_size). On-demand and time critical tasks always have a batch size of one.
"""
def __init__(self):
# init node first, must be done before call to super init for service advertising to work
rospy.init_node("task_executor", log_level=rospy.INFO)
if rospy.get_param('use_sim_time', False):
rospy.loginfo('Using sim time, waiting for time update')
rospy.wait_for_message('/clock', Clock)
# init superclasses
super( MDPTaskExecutor, self ).__init__()
# collection of MDPTasks sorted by deadline
self.normal_tasks = SortedCollection(key=(lambda t: t.task.end_before))
self.time_critical_tasks = SortedCollection(key=(lambda t: t.task.execution_time))
# how late can tasks be expected to be before they're dropped at planning time
self.allowable_lateness = rospy.Duration(rospy.get_param("~allowable_lateness", 300))
self.state_lock = threading.Lock()
self.mdp_exec_client = None
self.set_active_batch([])
self.to_cancel = set()
# is a on-demand task active
self.on_demand_active = False
# only ever allow one batch in the execution queue. If this restriction is removed then demanding won't work immediately
self.mdp_exec_queue = Queue(maxsize = 1)
# Whether or not the normal tasks need to be checked
self.recheck_normal_tasks = False
# how much time should we try to fill with tasks. this is the default and will be extended if necessary
self.execution_window = rospy.Duration(1200)
# and the max number of tasks to fit into this window due to MDP scaling issues
self.batch_limit = 5
self.expected_completion_time = rospy.Time()
self.mdp_exec_thread = Thread(target=self.mdp_exec)
self.mdp_exec_thread.start()
# topic on which current schedule is broadcast
self.schedule_publisher = rospy.Publisher('current_schedule', ExecutionStatus, latch = True, queue_size = 1)
self.all_tasks_schedule_publisher = rospy.Publisher('task_executor/all_tasks', ExecutionStatus, latch = True, queue_size = 1)
self.update_schedule_condition = Condition()
self.schedule_publish_thread = Thread(target=self.publish_schedule)
self.schedule_publish_thread.start()
self.use_combined_sort_criteria = rospy.get_param('~combined_sort', False)
self.cancel_at_window_end = rospy.get_param('~close_windows', False)
if self.use_combined_sort_criteria:
rospy.loginfo('Using combined sort criteria')
else:
rospy.loginfo('Using separate sort criteria')
self.advertise_services()
self.tz = tzlocal()
def add_co_safe_tasks_ros_srv(self, req):
"""
Adds a task into the task execution framework.
"""
try:
self.service_lock.acquire()
now = rospy.get_rostime()
task_ids = []
tasks = []
task_spec_triples = []
for mdp_task in req.mdp_tasks:
task = Task()
task.task_id = self.get_next_id()
task_ids.append(task.task_id)
task.start_after = mdp_task.start_after
task.end_before = mdp_task.end_before
task.priority = mdp_task.priority
task.action = mdp_task.mdp_spec.ltl_task
if task.start_after.secs == 0:
rospy.logwarn('Task %s did not have start_after set' % (task.action))
task.start_after = now
if task.end_before.secs == 0:
rospy.logwarn('Task %s did not have end_before set, using start_after' % (task.action))
task.end_before = task.start_after
tasks.append(task)
task_spec_triples.append((task, mdp_task.mdp_spec, mdp_task.is_interruptible))
self.add_specs(task_spec_triples)
self.log_task_events(tasks, TaskEvent.ADDED, rospy.get_rostime())
return [task_ids]
finally:
self.service_lock.release()
add_co_safe_tasks_ros_srv.type=AddCoSafeTasks
def demand_co_safe_task_ros_srv(self, req):
"""
Demand a the task from the execution framework.
"""
try:
self.service_lock.acquire()
now = rospy.get_rostime()
if not self.are_active_tasks_interruptible():
return [False, 0, self.active_task_completes_by - now]
# A task needs to be created for internal monitoring
task = Task()
task.task_id = self.get_next_id()
task.start_after = req.start_after
task.end_before = req.end_before
task.action = req.domain_spec.ltl_task
# give the task some sensible defaults
if task.start_after.secs == 0:
rospy.loginfo('Demanded task %s did not have start_after set, using now' % (task.action))
task.start_after = now
if task.end_before.secs == 0:
rospy.loginfo('Demand task %s did not have end_before set, using start_after' % (task.action))
# make this appear as a time-critical task
task.end_before = now
task.execution_time = now
# stop anything else
if len(self.active_tasks) > 0:
self.pause_execution()
self.executing = False
self.cancel_active_task()
# and inform implementation to let it take action
self.spec_demanded(task, req.domain_spec)
if not self.executing:
self.executing = True
self.start_execution()
self.log_task_event(task, TaskEvent.DEMANDED, rospy.get_rostime())
return [True, task.task_id, rospy.Duration(0)]
finally:
self.service_lock.release()
demand_co_safe_task_ros_srv.type=DemandCoSafeTask
def _extend_formalua_with_exec_flag(self, formula, state_var_name):
insert_after = len(formula) - 1
for i in range(len(formula) - 1, 0, -1):
if formula[i] == ')':
insert_after = i
elif formula[i] == '(':
break
return formula[:insert_after] + ' & (X ' + state_var_name + '=1)' + formula[insert_after:]
def _create_travel_mdp_task(self, waypoint):
""" Creates an MDP task for just reacing these waypoints
"""
state_var = MdpStateVar()
action = MdpAction()
task = Task(action='(F "%s")' % waypoint)
return MDPTask(task, state_var, action, is_ltl = True)
def _convert_spec_to_mdp_action(self, task, mdp_spec, is_ltl = False, is_interruptible = True):
"""
Converts an already formed MdpDomainSpec into our internal representation that's now a bit redundant.
"""
mdp_task = MDPTask(task, None, None, is_ltl = is_ltl, is_interruptible = is_interruptible)
mdp_task._set_mdp_spec(mdp_spec)
return mdp_task
def _convert_task_to_mdp_action(self, task):
""" Converts a Task to a MdpAction.
returns a task, state var, action triple
"""
is_ltl = False
# if this is the case then we're passed an LTL formula
if ' ' in task.action:
# action_name = 'n'+ str(task.task_id) + '_ltl_task'
# state_var_name = 'executed_' + action_name
state_var = MdpStateVar()
outcome = MdpActionOutcome()
action = MdpAction()
# task.action = self._extend_formalua_with_exec_flag(task.action, state_var_name)
# state_var = MdpStateVar(name = state_var_name,
# init_val = 0, min_range = 0,
# max_range = 1)
# outcome = MdpActionOutcome(probability = 1.0,
# post_conds = [StringIntPair(string_data = state_var_name, int_data = 1)],
# duration_probs = [1.0],
# durations = [0])
# action = MdpAction(name=action_name,
# pre_conds=[StringIntPair(string_data=state_var_name, int_data=0)],
# outcomes=[outcome])
is_ltl = True
else:
action_name = 'n'+ str(task.task_id) + '_' + task.action + '_at_' + task.start_node_id.replace(' | ', '_or_')
# make sure there is nothing to make PRISM cry
action_name = action_name.replace('/','_')
state_var_name = 'executed_' + action_name
state_var = MdpStateVar(name = state_var_name,
init_val = 0, min_range = 0,
max_range = 1)
outcome=MdpActionOutcome(probability = 1.0,
post_conds = [StringIntPair(string_data = state_var_name, int_data = 1)],
duration_probs = [1.0],
durations = [task.expected_duration.to_sec()])
action = MdpAction(name=action_name,
action_server=task.action,
pre_conds=[StringIntPair(string_data=state_var_name, int_data=0)],
outcomes=[outcome])
if len(task.start_node_id) > 0:
for wp in get_start_node_ids(task):
action.waypoints.append(wp)
action.arguments = task.arguments
# print state_var
# print action
return MDPTask(task, state_var, action, is_ltl = is_ltl)
def add_tasks(self, tasks):
""" Called with new tasks for the executor """
with self.state_lock:
for task in tasks:
mdp_task = self._convert_task_to_mdp_action(task)
if task.start_after == task.end_before:
self.time_critical_tasks.insert(mdp_task)
else:
self.normal_tasks.insert(mdp_task)
self.republish_schedule()
self.recheck_normal_tasks = True
def add_specs(self, task_spec_triples):
""" Called with new mdp_specs for the executor """
with self.state_lock:
for task, mdp_spec, is_interruptible in task_spec_triples:
mdp_task = self._convert_spec_to_mdp_action(task, mdp_spec, is_interruptible = is_interruptible)
if task.start_after == task.end_before:
self.time_critical_tasks.insert(mdp_task)
else:
self.normal_tasks.insert(mdp_task)
self.republish_schedule()
self.recheck_normal_tasks = True
def spec_demanded(self, task, mdp_spec):
with self.state_lock:
prior_execution_state = self.executing
# this cleans up the current execution and sets self.executing to false
self.pause_execution()
# todo: potential race condition -- what happens if someone calls start/pause execution here
with self.state_lock:
# convert the demanded task into an mdp task for policy execution
demanded_mdp_task = self._convert_spec_to_mdp_action(task, mdp_spec)
demanded_mdp_task.is_on_demand = True
# and queue it up for execution
mdp_goal = self._mdp_single_task_to_goal(demanded_mdp_task)
# put blocks until the queue is empty, so we guarantee that the queue is empty while we're under lock
tasks = [demanded_mdp_task]
self.mdp_exec_queue.put((mdp_goal, tasks, self._get_guarantees_for_batch(tasks)[1]))
rospy.loginfo('Queued up demanded task: %s' % (demanded_mdp_task.task.action))
self.executing = prior_execution_state
def goal_status_to_task_status(self, goal_status):
if goal_status == GoalStatus.PREEMPTED:
return TaskEvent.TASK_PREEMPTED
elif goal_status == GoalStatus.SUCCEEDED:
return TaskEvent.TASK_SUCCEEDED
elif goal_status == GoalStatus.ACTIVE:
return TaskEvent.TASK_FAILED
else:
if goal_status != GoalStatus.ABORTED:
rospy.logwarn('Unknown conversion to TaskStatus for %s' % GoalStatus.to_string(goal_status))
return TaskEvent.TASK_FAILED
def mdp_exec_feedback(self, feedback):
"""
Called during execution with feedback from policy execution.
"""
with self.state_lock:
# print("Got Feedback: " + str(feedback))
rospy.loginfo('%s received feedback %s, %s' % (feedback.executed_action, GoalStatus.to_string(feedback.execution_status), feedback.expected_time.to_sec()))
self.expected_completion_time = self._expected_duration_to_completion_time(feedback.expected_time)
# if feedback.execution_status >= GoalStatus.PREEMPTED:
# we don't need to check this status as we only recieve this feedback in the terminal states of the mdp, so this action is done regarless
# todo: PREEMPTED means the action started but was cancelled during the execution if the action server
# todo: ACTIVE means the action started but was cancelled during the execution if the action server but didn't prempt
# todo: show we allow these to be re-added to execution queue? currently preemption signals that a task has been permanently removed
# todo: if added back to normal tasks it will almost certainly be re-executed immediately as it's at the current location, causing a loop
now = rospy.get_rostime()
if feedback.executed_action != '' and self.remove_active_task(feedback.executed_action, self.goal_status_to_task_status(feedback.execution_status)):
# update the time critical tasks based on current location
self._update_time_critical_tasks(now)
self.republish_schedule()
def remove_active_task(self, action_name, task_status):
"""
Remove the indicated task from the active batch. This is based on the (valid) assumption that the action name uniquely identifies the task.
"""
for i in range(len(self.active_batch)):
mdp_task = self.active_batch[i]
if mdp_task.action is not None and mdp_task.action.name == action_name:
del self.active_batch[i]
del self.active_tasks[i]
log_string = 'Removing completed active task: %s. %s remaining in active batch' % (action_name, len(self.active_batch))
rospy.loginfo(log_string)
self.log_task_event(mdp_task.task, task_status, rospy.get_rostime(), description = log_string)
return True
# rospy.logwarn('Could not find %s in active batch' % action_name)
return False
def _check_for_late_normal_tasks(self, now):
"""
Removes any normal tasks which are too late to start execution
"""
dropped = False
while len(self.normal_tasks) > 0:
# look at the next normal task
next_normal_task = self.normal_tasks[0]
# drop the task if there's not enough time for expected duration to occur before the window closes
# this ignores the navigation time for this task, making task dropping more permissive than it should be. this is ok for now.
if now > (next_normal_task.task.end_before - next_normal_task.task.expected_duration):
log_string = 'Dropping queued normal task %s at %s as time window closed at %s ' % (next_normal_task.task.action, rostime_to_python(now), rostime_to_python(next_normal_task.task.end_before))
rospy.loginfo(log_string)
self.normal_tasks = SortedCollection(self.normal_tasks[1:], key=(lambda t: t.task.end_before))
self.log_task_event(next_normal_task.task, TaskEvent.DROPPED, now, description = log_string)
dropped = True
else:
break
return dropped
def _check_for_late_time_critical_tasks(self, now):
"""
Removes any time-critical tasks which are too late to start execution
"""
dropped = False
while len(self.time_critical_tasks) > 0:
next_time_critical_task = self.time_critical_tasks[0]
until_next_critical_task = next_time_critical_task.task.execution_time - now
if until_next_critical_task < (ZERO - self.allowable_lateness):
log_string = 'Dropping time-critical task %s as %s not enough time for execution' % (next_time_critical_task.action.name, until_next_critical_task.to_sec())
rospy.loginfo(log_string)
self.time_critical_tasks = SortedCollection(self.time_critical_tasks[1:], key=(lambda t: t.task.execution_time))
self.log_task_event(next_time_critical_task.task, TaskEvent.DROPPED, now, description = log_string)
dropped = True
else:
break
return dropped
def _get_blacklisted_nodes(self):
"""
Gets blacklisted nodes from service. If service does not exist, returns an empty list.
"""
try:
get_blacklisted_nodes = rospy.ServiceProxy('task_executor/get_blacklisted_nodes', GetBlacklistedNodes)
resp = get_blacklisted_nodes()
return resp.nodes
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return []
def _mdp_single_task_to_goal(self, mdp_task):
mdp_spec = self._mdp_tasks_to_spec([mdp_task])
return ExecutePolicyGoal(spec = mdp_spec)
def _mdp_tasks_to_spec(self, mdp_tasks):
"""
Take a collection of MDPTask objects and produce an MdpDomainSpec from them.
"""
mdp_spec = MdpDomainSpec()
ltl_tasks = []
non_ltl_tasks = []
for mdp_task in mdp_tasks:
if mdp_task.is_ltl:
ltl_tasks.append(mdp_task)
elif mdp_task.is_mdp_spec:
ltl_tasks.append(mdp_task)
mdp_spec.vars.extend(mdp_task.mdp_spec.vars)
mdp_spec.actions.extend(mdp_task.mdp_spec.actions)
else:
non_ltl_tasks.append(mdp_task)
mdp_spec.vars.append(mdp_task.state_var)
mdp_spec.actions.append(mdp_task.action)
mdp_spec.ltl_task = ''
task_prefix = 'F '
# prevent the policy from visiting blacklisted nodes
# short-term fix is to have (!X U Y) & (!X U Z),
# but longer term is Bruno adding G !X so we can have global invariants
blacklist = self._get_blacklisted_nodes()
if len(blacklist) > 0:
task_prefix = '(!\"%s\"' % blacklist[0]
for bn in blacklist[1:]:
task_prefix += ' & !\"%s\"' % bn
task_prefix += ') U '
if len(non_ltl_tasks) > 0:
for mdp_task in non_ltl_tasks:
mdp_spec.ltl_task += '(%s %s=1) & ' % (task_prefix, mdp_task.state_var.name)
mdp_spec.ltl_task = mdp_spec.ltl_task[:-3]
# mdp_spec.ltl_task += '))'
if len(ltl_tasks) > 0:
mdp_spec.ltl_task += ' & '
if len(ltl_tasks) > 0:
for ltl_task in ltl_tasks:
if ltl_task.is_mdp_spec:
mdp_spec.ltl_task += ltl_task.mdp_spec.ltl_task
mdp_spec.ltl_task += ' & '
else:
mdp_spec.ltl_task += ltl_task.task.action
mdp_spec.ltl_task += ' & '
mdp_spec.ltl_task = mdp_spec.ltl_task[:-3]
return mdp_spec
def _drop_out_of_time_tasks(self, now):
"""
Drop any normal or time-critical task when their time windows have been exceeded.
"""
dropped = self._check_for_late_time_critical_tasks(now)
dropped = dropped or self._check_for_late_normal_tasks(now)
return dropped
def _get_guarantees_for_batch(self, task_batch, estimates_service = None, initial_waypoint = None, epoch = None):
if epoch is None:
epoch = rospy.get_rostime()
if initial_waypoint is None:
initial_waypoint = self.get_topological_node()
if estimates_service is None:
estimates_service = rospy.ServiceProxy('mdp_plan_exec/get_guarantees_for_co_safe_task', GetGuaranteesForCoSafeTask)
estimates_service.wait_for_service()
spec = self._mdp_tasks_to_spec(task_batch)
request = GetGuaranteesForCoSafeTaskRequest(spec = spec, initial_waypoint = initial_waypoint, epoch = epoch)
service_response = estimates_service(request)
return (spec, service_response)
def _choose_new_active_batch(self, task_check_limit, now, execution_window):
"""
Choose the tasks to execute next.
task_check_limit says how far along the normal task list to go to look at possible tasks
"""
# evaluated_at_least_one_task, new_active_batch, new_active_spec, new_active_guarantees = self._choose_new_active_batch()
mdp_estimates = rospy.ServiceProxy('mdp_plan_exec/get_guarantees_for_co_safe_task', GetGuaranteesForCoSafeTask)
mdp_estimates.wait_for_service()
last_successful_spec = None
possibles_with_guarantees_in_time = []
possibles_with_guarantees = []
# now for each single task, get indpendent guarantees
for mdp_task in self.normal_tasks[:task_check_limit]:
try:
(mdp_spec, guarantees) = self._get_guarantees_for_batch([mdp_task], estimates_service = mdp_estimates, epoch = now)
# only reason about combining tasks that have their windows opena and are achievable on their own
#
nav_time = max_duration(guarantees.expected_time - mdp_task.task.max_duration, ZERO)
if False:
print 'timing details'
print ros_time_to_string(now)
print ros_time_to_string(mdp_task.task.start_after)
print ros_duration_to_string(guarantees.expected_time)
print ros_duration_to_string(mdp_task.task.max_duration)
print "Start by: %s" % ros_time_to_string(mdp_task.task.start_after - nav_time)
if now > (mdp_task.task.start_after - nav_time):
if guarantees.probability > 0 and guarantees.expected_time <= execution_window:
possibles_with_guarantees_in_time.append((mdp_task, mdp_spec, guarantees))
# keep all guarantees anyway, as we might need to report one if we can't find a task to execute
possibles_with_guarantees.append((mdp_task, mdp_spec, guarantees))
except Exception, e:
rospy.logwarn('Ignoring task due to: %s' % e)
self.normal_tasks.remove(mdp_task)
if self.use_combined_sort_criteria:
def task_reward(task_tuple):
# sanity check for zero-time case
if task_tuple[2].expected_time.secs > 0:
expected_time = task_tuple[2].expected_time.to_sec()
else:
expected_time = 1.0
# sanity check for zero priority case
if task_tuple[0].task.priority == 0:
rospy.logwarn('Priority is used for sorting but task %s had a priority of 0' % (task_tuple[0].task.action))
priority = 1.0
else:
priority = task_tuple[0].task.priority
return (priority*task_tuple[2].probability)/expected_time
possibles_with_guarantees_in_time = sorted(possibles_with_guarantees_in_time, key=lambda x: task_reward(x), reverse=True)
for possible in possibles_with_guarantees_in_time:
rospy.loginfo('%s, with reward %.2f, will take %.2f secs with prio %s and prob %.4f ending before %s' % (possible[0].task.action, task_reward(possible), possible[2].expected_time.to_sec(), possible[0].task.priority, possible[2].probability, rostime_to_python(possible[0].task.end_before)))
else:
# sort the list of possibles by probability of success, with highest prob at start
# sort is stable, so a sequence of sorts will work, starting with the lowest priorit
possibles_with_guarantees_in_time = sorted(possibles_with_guarantees_in_time, key=lambda x: x[0].task.end_before)
possibles_with_guarantees_in_time = sorted(possibles_with_guarantees_in_time, key=lambda x: x[2].probability, reverse=True)
possibles_with_guarantees_in_time = sorted(possibles_with_guarantees_in_time, key=lambda x: x[0].task.priority, reverse=True)
for possible in possibles_with_guarantees_in_time:
rospy.loginfo('%s will take %.2f secs with prio %s and prob %.4f ending before %s' % (possible[0].task.action, possible[2].expected_time.to_sec(), possible[0].task.priority, possible[2].probability, rostime_to_python(possible[0].task.end_before)))
# if at least one task fits into the executable time window
if len(possibles_with_guarantees_in_time) > 0:
# keep the most probable
new_active_batch = [possibles_with_guarantees_in_time[0][0]]
last_successful_spec = (possibles_with_guarantees_in_time[0][1], possibles_with_guarantees_in_time[0][2])
# remove the most probable from the list of possibles
possibles_with_guarantees_in_time = possibles_with_guarantees_in_time[1:]
# limit the tasks inspected by the batch limit... we are skipping tasks, so just using the batch limit isn't enough
for possible in possibles_with_guarantees_in_time:
if len(new_active_batch) == self.batch_limit:
break
mdp_task = possible[0]
mdp_tasks_to_check = copy(new_active_batch)
mdp_tasks_to_check.append(mdp_task)
(mdp_spec, guarantees) = self._get_guarantees_for_batch(mdp_tasks_to_check, estimates_service = mdp_estimates, epoch = now)
if guarantees.expected_time > execution_window:
rospy.loginfo('Too long policy duration for %s: %s' % (mdp_spec.ltl_task, guarantees.expected_time.to_sec()))
else:
rospy.loginfo('Acceptable policy duration for %s: %s' % (mdp_spec.ltl_task, guarantees.expected_time.to_sec()))
last_successful_spec = (mdp_spec, guarantees)
new_active_batch.append(mdp_task)
return True, new_active_batch, last_successful_spec[0], last_successful_spec[1]
# if we get here then at least one task can be executed now, but doesn't fit into the execution window on its own
elif len(possibles_with_guarantees) > 0:
return True, [], possibles_with_guarantees[0][1], possibles_with_guarantees[0][2]
# if we get here either there are no tasks or none have passed start_after
else:
return False, [], None, None
def _update_time_critical_tasks(self, now):
"""
Update the execution time of each time critical task based on current location.
"""
# todo: we don't need to always check, only when location has changed... but let's optimise later
# how far in the future to update tasks
only_check_in_the_next = self.execution_window * 2
check_before = now + only_check_in_the_next
estimates_service = rospy.ServiceProxy('mdp_plan_exec/get_guarantees_for_co_safe_task', GetGuaranteesForCoSafeTask)
estimates_service.wait_for_service()
new_time_critical_tasks = SortedCollection(key=(lambda t: t.task.execution_time))
for mdp_task in self.time_critical_tasks:
try:
if mdp_task.task.execution_time.secs == 0 or mdp_task.task.start_after < check_before:
spec, guarantees = self._get_guarantees_for_batch([self._create_travel_mdp_task(mdp_task.task.start_node_id)], estimates_service = estimates_service, epoch = now)
# take the predicted time directly, alternative factor in the probability,
# see below.
expected_navigation_time = rospy.Duration(guarantees.expected_time.secs)
# prevents an underestimate due to this being the expected time to failure
# expected_navigation_time = rospy.Duration(guarantees.expected_time.secs / guarantees.probability)
rospy.loginfo('Expected navigation time for time-critical task: %s' % expected_navigation_time.secs)
mdp_task.task.execution_time = mdp_task.task.start_after - expected_navigation_time
new_time_critical_tasks.insert(mdp_task)
except Exception, e:
rospy.logwarn('Dropping time-critical task due to: %s' % e)
self.time_critical_tasks.remove(mdp_task)
self.log_task_event(mdp_task.task, TaskEvent.DROPPED, now, description = 'Error on guarantee call. Probably due to incorrect waypoint.')
self.republish_schedule()
self.time_critical_tasks = new_time_critical_tasks
# for mdp_task in self.time_critical_tasks:
# print mdp_task.action.name, 'at', rostime_to_python(mdp_task.task.execution_time), 'for', rostime_to_python(mdp_task.task.start_after)
def _should_start_next_time_critical_task(self, now):
if len(self.time_critical_tasks) > 0:
# if we're over the start time, it's good to go... lateness is handled in _check_for_late_time_critical_tasks
return now > self.time_critical_tasks[0].task.execution_time
else:
return False
def _next_execution_batch(self):
"""
Called when nothing is executing and another batch of tasks are required for execution.
"""
# todo: make the locking more fine-grained. Currently execution cannot be paused during this method, but the calls to the mdp services can take a long time
with self.state_lock:
now = rospy.get_rostime()
# todo: this ignores what happens when the robot is moving, so need to check during execution too.
self._update_time_critical_tasks(now)
if self._drop_out_of_time_tasks(now):
self.republish_schedule()
execution_window = self.execution_window
# now see how much time is available until the next time critical task
if len(self.time_critical_tasks) > 0:
next_time_critical_task = self.time_critical_tasks[0]
until_next_critical_task = next_time_critical_task.task.execution_time - now
rospy.loginfo('Time until next time-critical task: %.2f secs' % until_next_critical_task.to_sec())
if until_next_critical_task < execution_window:
execution_window = until_next_critical_task
# if we're close to a time critical task, then do that
if self._should_start_next_time_critical_task(now):
new_active_batch = [next_time_critical_task]
self.time_critical_tasks = SortedCollection(self.time_critical_tasks[1:], key=(lambda t: t.task.execution_time))
mdp_goal = self._mdp_single_task_to_goal(next_time_critical_task)
rospy.loginfo('Executing time-critical task: %s. Start time was %s for execution at %s. Time is now %s' % (mdp_goal.spec.ltl_task, rostime_to_python(next_time_critical_task.task.execution_time), rostime_to_python(next_time_critical_task.task.start_after), rostime_to_python(now)))
self.mdp_exec_queue.put((mdp_goal, new_active_batch, self._get_guarantees_for_batch(new_active_batch, epoch = now)[1]))
# else see what we can squeeze into available time
elif self.recheck_normal_tasks:
rospy.loginfo('Checking for normal tasks to fit into available time: %.2f secs' % execution_window.to_sec())
# create mdp task batch to fit into available time
#
# this checks expected time after adding each task to the batch
if len(self.normal_tasks) == 0:
rospy.loginfo('No normal tasks remaining')
self.recheck_normal_tasks = False
else:
task_check_limit = 2 * self.batch_limit
evaluated_at_least_one_task, new_active_batch, new_active_spec, new_active_guarantees = self._choose_new_active_batch(task_check_limit, now, execution_window)
# if we found tasks to fit into the time available
if len(new_active_batch) > 0:
new_normal_tasks = self.normal_tasks[task_check_limit:]
for mdp_task in self.normal_tasks[:task_check_limit]:
if mdp_task not in new_active_batch:
new_normal_tasks.append(mdp_task)
self.normal_tasks = SortedCollection(new_normal_tasks, key=(lambda t: t.task.end_before))
mdp_goal = ExecutePolicyGoal(spec = new_active_spec)
rospy.loginfo('Executing normal batch: %s' % mdp_goal.spec.ltl_task)
self.mdp_exec_queue.put((mdp_goal, new_active_batch, new_active_guarantees))
# if we couldn't fit a batch in, but there were normal tasks available
elif evaluated_at_least_one_task:
# if the first available task won't fit into the available execution time window, and this is the max possible, then increase the window size accordingly
if execution_window == self.execution_window and new_active_guarantees.expected_time > self.execution_window:
# for now just increase to the expected time of last tested policy
self.execution_window = new_active_guarantees.expected_time
rospy.loginfo('Extending default execution windown to %s' % self.execution_window.to_sec())
# if we get here then we can't fit the first available task into the time before the first time-critical task
else:
# the basic thing here is not to recheck the normal tasks until after the next time-critical execution or until new normal tasks are added (which could be potentially earlier/shorter)
self.recheck_normal_tasks = False
# todo: we could also try some optimisation to fit in a task other than the first available normal one
else:
# if we get here we have normal tasks, but none of them were available for execution. this probaly means
# that they're for the future
# we can't set recheck_normal_tasks to False as this is the only way the time is rechecked
rospy.loginfo('Next task available for execution in at most %.2f secs' % (self.normal_tasks[0].task.start_after - now).to_sec())
# pass
else:
rospy.logdebug('No need to recheck normal tasks')
def _expected_duration_to_completion_time(self, expected_duration):
"""
Take a guarantees struct and determine when the execution should complete by
"""
if expected_duration.secs < 0:
rospy.logwarn('Expected duration was less that 0, giving a default of 5 minutes')
expected_duration = rospy.Duration(5 * 60)
expected_completion_time = rospy.get_rostime() + expected_duration + rospy.Duration(60)
if self.cancel_at_window_end:
for mdp_task in self.active_batch:
# only curtail tasks to window for non-time critical tasks
if mdp_task.task.start_after != mdp_task.task.end_before and mdp_task.task.end_before < expected_completion_time:
# rospy.logwarn('Curtailing execution with end of task window')
expected_completion_time = mdp_task.task.end_before
return expected_completion_time
def are_active_tasks_interruptible(self):
for mdp_task in self.active_batch:
if not mdp_task.is_interruptible:
return False
return super(MDPTaskExecutor, self).are_active_tasks_interruptible()
def _wait_for_policy_execution(self):
"""
Wait until policy execution is complete or until we reach expected_completion_time at which point policy execution is preempted.
"""
poll_time = rospy.Duration(5)
overtime = rospy.Duration(0)
# after an hour of overtime, give up
overtime_threshold = rospy.Duration(60 * 60)
log_count = 0
while not self.mdp_exec_client.wait_for_result(poll_time) and not rospy.is_shutdown():
# locking here as the feedback callback can change self.expected_completion_time
with self.state_lock:
now = rospy.get_rostime()
remaining_secs = (self.expected_completion_time - now).to_sec()
if remaining_secs < 0:
if self.are_active_tasks_interruptible():
rospy.logwarn('Policy execution did not complete in expected time, preempting')
self.mdp_exec_client.cancel_all_goals()
# give the policy execution some time to clean up
complete = self.mdp_exec_client.wait_for_result(rospy.Duration(70))
if not complete:
rospy.logwarn('Policy execution did not service preempt request in a reasonable time')
return GoalStatus.ACTIVE
else:
return GoalStatus.PREEMPTED
else:
rospy.logwarn('Policy execution did not complete in expected time, but is non-interruptible, so waiting. Overtime: %ss' % ros_duration_to_string(overtime))
overtime += poll_time
if overtime > overtime_threshold:
rospy.logwarn('Policy execution has exceeded overtime threshold all execution flags ignored, preempting regardless')
self.mdp_exec_client.cancel_all_goals()
# give the policy execution some time to clean up
complete = self.mdp_exec_client.wait_for_result(rospy.Duration(70))
if not complete:
rospy.logwarn('Policy execution did not service preempt request in a reasonable time')
return GoalStatus.ACTIVE
else:
return GoalStatus.RECALLED
else:
if log_count % 3 == 0:
rospy.loginfo('Another %.2f seconds until expected policy completion' % remaining_secs)
log_count += 1
with self.state_lock:
# check whether we're due to start a time-critical task that we'd otherwise miss
if self._should_start_next_time_critical_task(now):
if self.on_demand_active:
rospy.logwarn('Ignoring the start of a time-critical task due to an on-demand task')
else:
rospy.logwarn('We should be executing a time-critical task now, so cancelling execution')
self.mdp_exec_client.cancel_all_goals()
complete = self.mdp_exec_client.wait_for_result(rospy.Duration(70))
if not complete:
rospy.logwarn('Policy execution did not service preempt request in a reasonable time')
return GoalStatus.ACTIVE
else:
return GoalStatus.PREEMPTED
return self.mdp_exec_client.get_state()
def mdp_exec(self):
"""
This is the main loop of the executor. It checks for the next goal to execute.
If there's nothing to execute then it calls _next_execution_batch to check for available tasks.
"""
while not rospy.is_shutdown():
# all encompassing try/catch to make sure this loop does not go down
try:
# try/catch for empty queue
try:
# keep looping until paused or an Empty is thrown
while self.executing and not rospy.is_shutdown():
(mdp_goal, new_active_batch, guarantees) = self.mdp_exec_queue.get(timeout = 1)
sent_goal = False
with self.state_lock:
# always set active batch, but we can correct it later if we don't actually send the goal
self.set_active_batch(deepcopy(new_active_batch))
self.republish_schedule()
# execution status could have changed while acquiring the lock
if self.executing:
self.mdp_exec_client = actionlib.SimpleActionClient('mdp_plan_exec/execute_policy', ExecutePolicyAction)
self.mdp_exec_client.wait_for_server()
# last chance! -- if there was a change during wait
if self.executing:
self.log_task_events((m.task for m in self.active_batch), TaskEvent.TASK_STARTED, rospy.get_rostime(), description = mdp_goal.spec.ltl_task)
self.mdp_exec_client.send_goal(mdp_goal, feedback_cb = self.mdp_exec_feedback)
# this is when we expect navigation to complete by
self.expected_completion_time = self._expected_duration_to_completion_time(guarantees.expected_time)
rospy.loginfo('Sent goal for %s' % mdp_goal.spec.ltl_task)
self.republish_schedule()
for m in self.active_batch:
self.on_demand_active = self.on_demand_active or m.is_on_demand
if self.on_demand_active:
rospy.loginfo('This is an on-demand task')
sent_goal = True
else:
self.mdp_exec_client = None
# indicate that all processing on the task removed from the queue is complete
# this allows join() to work correctly
self.mdp_exec_queue.task_done()
if sent_goal:
final_status = self._wait_for_policy_execution()
with self.state_lock:
# these are left after execution
# remove those tasks which were part of the cancelled set
# print self.to_cancel
active_tasks = []
cancelled_tasks = []
for m in self.active_batch:
# print m.task.task_id
if m.task.task_id in self.to_cancel:
# print 'cancelled'
cancelled_tasks.append(m)
else:
# print 'active'
active_tasks.append(m)
self.active_batch = active_tasks
self.to_cancel = []
# print cancelled_tasks
# print self.active_batch
if len(cancelled_tasks) > 0:
log_string = 'Dropped %s task(s) after execution due to cancellation' % len(cancelled_tasks)
rospy.loginfo(log_string)
self.log_task_events((m.task for m in cancelled_tasks), TaskEvent.DROPPED, rospy.get_rostime(), description = log_string)
remaining_active = len(self.active_batch)
self.on_demand_active = False
# policy execution finished everything
#if final_status == GoalStatus.SUCCEEDED or final_status == GoalStatus.PREEMPTED:
if True: #This way tasks arent dropped when navigation failures occur. TODO see whether the stuff under the else statement is needed for some cases.
self.deactivate_active_batch(goal_status = final_status)
# here we may have cancelled an overrunning policy or had some other problem
else:
log_string = 'Policy execution exited with status %s, dropping remaining active tasks' % GoalStatus.to_string(final_status)
rospy.loginfo(log_string)
self.log_task_events((m.task for m in self.active_batch), TaskEvent.DROPPED, rospy.get_rostime(), description = log_string)
# todo: is dropping really necessary here? the tasks themselves were not aborted, just policy execution
self.set_active_batch([])
# make sure this can't be used now execution is complete
self.mdp_exec_client = None
# whatever happened or was executed, we should now recheck the available normal tasks
self.recheck_normal_tasks = True
else:
with self.state_lock:
self.deactivate_active_batch(goal_status = GoalStatus.RECALLED, save_all = True)
self.republish_schedule()
except Empty, e:
pass
# state of execution could have changed since the last check
if self.executing:
self._next_execution_batch()
else:
rospy.sleep(1)
except Exception, e:
rospy.logwarn('Caught exception in the mdp_exec loop: %s' % e)
rospy.sleep(1)
# makes publishing thread check for exit
self.republish_schedule()
def set_active_batch(self, batch):
"""
Set the active batch of tasks. Also updates self.active_tasks in the base class
"""
self.active_batch = copy(batch)
self.active_tasks = [m.task for m in self.active_batch]
def start_execution(self):
""" Called when overall execution should (re)start """
rospy.loginfo('(Re-)starting execution')
def deactivate_active_batch(self, goal_status, save_all = False, description = ''):
"""
Takes the tasks from the active batch and returns them to the approach lists for later consideration.
"""
active_count = len(self.active_batch)
now = rospy.get_rostime()
log_string = 'De-activating remaining %s tasks after execution finished with status %s.' % (active_count, GoalStatus.to_string(goal_status))
if active_count > 0:
if save_all:
log_string += ' Saving all back to task list.'
for mdp_task in self.active_batch:
if mdp_task.task.start_after == mdp_task.task.end_before:
self.time_critical_tasks.insert(mdp_task)
else:
self.normal_tasks.insert(mdp_task)
else:
# for each task remaining in the active batch, put it back into the right list
do_not_reactivate_later = []
reactivate_later = []
for mdp_task in self.active_batch:
# we can't monitor the execution of these tasks, so we always assume they're done when deactivated
if mdp_task.is_ltl or mdp_task.is_mdp_spec or mdp_task.is_on_demand:
do_not_reactivate_later.append(mdp_task)
else:
reactivate_later.append(mdp_task)
self.log_task_events((m.task for m in do_not_reactivate_later), self.goal_status_to_task_status(goal_status), now, description = log_string + ' Cannot be reactivated later.')
self.log_task_events((m.task for m in reactivate_later), TaskEvent.TASK_STOPPED, now, description = log_string + ' Saved task to reactivate later')
for mdp_task in reactivate_later:
if mdp_task.task.start_after == mdp_task.task.end_before:
self.time_critical_tasks.insert(mdp_task)
else:
self.normal_tasks.insert(mdp_task)
# empty the active batch. this might mean some feedback misses the update
# the consequence is that the task was completed but we preempted before receiving the update,
# this means the task will be executed again, but there's no easy way around this
self.set_active_batch([])
rospy.loginfo(log_string)
return active_count
def pause_execution(self):
""" Called when overall execution should pause. This is called *before* self.executing is set to False. """
# make sure the queue for execution is empty
self.mdp_exec_queue.join()
with self.state_lock:
self._pause_execution_internal()
# wait for active batch to be empty before return
while not rospy.is_shutdown():
with self.state_lock:
if self.active_batch == []:
return
# print 'waiting for active batch to become empty'
rospy.sleep(0.5)
def _pause_execution_internal(self):
"""
Does the work of pausing execution, without the lock.
"""
# this is done by the super class *after* pause_execution completes, but we need to make sure that it is done before this lock is released to make sure execution does not continue after policy execution preemption
self.executing = False
# If the client is not None then there is execution going on. the active batch could be empty if we've just caught the tail end of execution
#
# Also there could be tasks in the active batch without an action client existing. as the client is created slightly later
if self.mdp_exec_client is not None:
# preempt the action server
self.mdp_exec_client.cancel_all_goals()
rospy.loginfo('Cancelling policy execution')
else:
rospy.loginfo('No policy execution active when pausing')
def task_demanded(self, demanded_task, currently_active_task):
""" Called when a task is demanded. self.active_task is the demanded task (and is being executed) and previously_active_task was the task that was being executed (which could be None) """
with self.state_lock:
prior_execution_state = self.executing
# this cleans up the current execution and sets self.executing to false
self.pause_execution()
# todo: potential race condition -- what happens if someone calls start/pause execution here
with self.state_lock:
# convert the demanded task into an mdp task for policy execution
demanded_mdp_task = self._convert_task_to_mdp_action(demanded_task)
demanded_mdp_task.is_on_demand = True
# and queue it up for execution
mdp_goal = self._mdp_single_task_to_goal(demanded_mdp_task)
# put blocks until the queue is empty, so we guarantee that the queue is empty while we're under lock
tasks = [demanded_mdp_task]
self.mdp_exec_queue.put((mdp_goal, tasks, self._get_guarantees_for_batch(tasks)[1]))
rospy.loginfo('Queued up demanded task: %s' % (demanded_mdp_task.action.name))
self.executing = prior_execution_state
def cancel_active_task(self):
"""
Called to cancel the task which is currently executing.
If something is being executed we handle this by simply pausing and restarting execution.
pause_execution is often called before this. (this is always the case currently)
"""
if self.executing:
# save the current executing tasks to drop later
with self.state_lock:
self.to_cancel = set([m.task.task_id for m in self.active_batch])
self.pause_execution()
with self.state_lock:
self.executing = True
def cancel_task(self, task_id):
""" Called when a request is received to cancel a task. The currently executing one is checked elsewhere. """
rospy.logwarn('Cancelling individual tasks is not yet implemented')
return False
def clear_schedule(self):
""" Called to clear all tasks from schedule, with the exception of the currently executing one. """
with self.state_lock:
prior_execution_state = self.executing
# this cleans up the current execution and sets self.executing to false
self.pause_execution()
# (try to) make sure the queues are empty (there's a chance that between the join and next state_lock that something could be added).
self.mdp_exec_queue.join()
with self.state_lock:
now = rospy.get_rostime()
self.log_task_events((m.task for m in self.normal_tasks), TaskEvent.DROPPED, now, description = 'Schedule was cleared')
self.normal_tasks.clear()
self.log_task_events((m.task for m in self.time_critical_tasks), TaskEvent.DROPPED, now, description = 'Schedule was cleared')
self.time_critical_tasks.clear()
self.executing = prior_execution_state
self.republish_schedule()
rospy.loginfo('All tasks cleared')
def republish_schedule(self):
"""
Notify schedule-publishing thread to update and publish schedule
"""
self.update_schedule_condition.acquire()
self.update_schedule_condition.notify()
self.update_schedule_condition.release()
def publish_schedule(self):
"""
Loops continuous publishing the upcoming tasks to be executed.
It is challenging to produce a list of the tasks that will be executed and when from this, so the compromises is that
ExecutionStatus contains the active batch with their execution_times set to now, all time-critical tasks and the next self.batch_limit normal tasks with their start time set to the end time of the current active batch.
"""
while not rospy.is_shutdown():
# all encompassing try/catch to make sure this loop does not go down
try:
# copy all relevant entries under lock
# we're taking a deepcopy as we might mess around with the times a bit
with self.state_lock:
expected_completion_time = deepcopy(self.expected_completion_time)
active_batch = deepcopy(self.active_batch)
normal_tasks = deepcopy(self.normal_tasks)
time_critical_tasks = deepcopy(self.time_critical_tasks)
now = rospy.get_rostime()
# todo: fill this value better
expected_end_of_batch = rospy.get_rostime() + rospy.Duration(120)
# start from the time_cr
schedule = ExecutionStatus(currently_executing = len(active_batch) > 0)
all_tasks = ExecutionStatus(currently_executing = len(active_batch) > 0)
schedule.header.stamp = now
all_tasks.header.stamp = now
for m in active_batch:
m.task.execution_time = now
schedule.execution_queue.append(m.task)
all_tasks.execution_queue.append(m.task)
# schedule.execution_queue += [m.task for m in time_critical_tasks]
all_tasks.execution_queue += [m.task for m in time_critical_tasks]
all_tasks.execution_queue += [m.task for m in normal_tasks]
all_tasks.execution_queue = sorted(all_tasks.execution_queue, key=lambda x: x.start_after)
all_tasks.execution_queue = sorted(all_tasks.execution_queue, key=lambda x: x.priority)
self.schedule_publisher.publish(schedule)
self.all_tasks_schedule_publisher.publish(all_tasks)
self.update_schedule_condition.acquire()
self.update_schedule_condition.wait()
self.update_schedule_condition.release()
except Exception, e:
rospy.logwarn('Caught exception in publish_schedule loop: %s' % e)
rospy.sleep(1)
if __name__ == '__main__':
executor = MDPTaskExecutor()
rospy.spin()
# create a schedule class which handles blocking until execution and manages the various changes
| mit | -8,600,928,490,053,119,000 | 46.717179 | 306 | 0.572468 | false |
AaronRegan/ObjectTracker | hog.py | 1 | 3796 | # import the necessary packages
from __future__ import print_function
from non_max_suppression import non_max_suppression
from myqueue import myqueue
from frames import frames
from object import Object
import numpy as np
import argparse
import datetime
import imutils
import cv2
import time
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
# otherwise, we are reading from a video file
else:
print("[INFO] starting video file thread...")
camera = myqueue(args["video"]).start()
time.sleep(1.0)
i = 0
centerX = 0
centerY = 0
objList = []
meas = []
pred = []
mp = np.array((2, 1), np.float32) # measurement
tp = np.zeros((2, 1), np.float32) # tracked / prediction
kalman = cv2.KalmanFilter(4, 2)
kalman.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
kalman.transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
kalman.processNoiseCov = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) * 0.03
def onPed(x, y):
global mp, meas
mp = np.array([[np.float32(x)], [np.float32(y)]])
meas.append((x, y))
def kalPredict(mp):
global tp, pred
kalman.correct(mp)
tp = kalman.predict()
pred.append((int(tp[0]), int(tp[1])))
def paint(tp, xA, yA, xB, yB):
global frame, pred
# cv2.circle(frame, ((tp[0]), (tp[1])), 3, (0, 0, 255), -1)
cv2.rectangle(frame, ((tp[0]) - ((xB - xA) / 2), (tp[1]) + (yB - yA) / 2),
(((tp[0]) + ((xB - xA) / 2)), ((tp[1]) - (yB - yA) / 2)), (0, 0, 255), 2)
fps = frames().start()
# initialize the HOG descriptor/person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# loop over the image paths
while camera.more():
frame = camera.read()
frame = imutils.resize(frame, width=600)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# start = datetime.datetime.now()
# detect people in the image
(rects, weights) = hog.detectMultiScale(frame, winStride=(8, 8),
padding=(32, 32), scale=1.05)
# print("[INFO] detection took: {}".format(
#(datetime.datetime.now() - start).total_seconds()))
# apply non-maxima suppression to the bounding boxes using a
# fairly large overlap threshold to try to maintain overlapping
# boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
# draw the final bounding boxes
for (xA, yA, xB, yB) in pick:
i = i+1
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
centerX = (xB + xA) / 2
centerY = (yB + yA) / 2
obj = Object(centerX, centerY, i)
objList.append(obj)
onPed(centerX, centerY)
kalPredict(mp)
paint(tp, xA, yA, xB, yB)
cv2.putText(frame, "Queue Size: {}".format(camera.Q.qsize()),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
peds_found = "Found " + str(len(pick)) + " Pedestrians"
cv2.putText(frame, peds_found, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# show the output images
cv2.imshow("HOG", frame)
cv2.waitKey(1)
fps.update()
k = cv2.waitKey(27) & 0xff
if k == 27:
break
fps.stop()
for objects in range(len(objList) - 1):
print(str(objList[objects]))
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
cv2.destroyAllWindows()
camera.stop()
| mit | -1,399,075,531,900,514,000 | 32.59292 | 110 | 0.606164 | false |
bokeh/bokeh | tests/unit/bokeh/sampledata/test_airport_routes.py | 1 | 2266 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh._testing.util.api import verify_all
# Module under test
#import bokeh.sampledata.airport_routes as bsa # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'airports',
'routes',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.airport_routes", ALL))
@pytest.mark.sampledata
def test_airports(pd) -> None:
import bokeh.sampledata.airport_routes as bsa
assert isinstance(bsa.airports, pd.DataFrame)
# don't check detail for external data
@pytest.mark.sampledata
def test_routes(pd) -> None:
import bokeh.sampledata.airport_routes as bsa
assert isinstance(bsa.routes, pd.DataFrame)
# don't check detail for external data
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | -1,342,589,427,822,301,700 | 34.40625 | 89 | 0.319506 | false |
ReconCell/smacha | smacha/test/smacha_test_examples/random_outcomes.py | 1 | 6029 | #!/usr/bin/env python
import smach
import random
class RandomOutcomeState(smach.State):
def __init__(self, input_keys = ['outcome'], output_keys = ['outcome'], callbacks = {}, outcomes=['succeeded']):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=outcomes)
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def execute(self, userdata):
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
return userdata.outcome
class CallbacksState(smach.State):
def __init__(self, input_keys=[], output_keys=[], callbacks=[]):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=['succeeded'])
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def execute(self, userdata):
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
return 'succeeded'
@smach.cb_interface(input_keys=[],
output_keys=['outcome'],
outcomes=['foo_0', 'foo_1', 'foo_2'])
def outcome_randomize_lambda_cb(self, userdata):
lambda_cb = lambda ud: random.choice(list(self._outcomes))
userdata.outcome = lambda_cb(userdata)
return 'succeeded'
RandomOutcomeState.outcome_randomize_lambda_cb = outcome_randomize_lambda_cb
@smach.cb_interface(input_keys=[],
output_keys=['outcome'],
outcomes=[])
def outcome_foo_0_lambda_cb(self, userdata):
lambda_cb = lambda ud: random.choice(list(self._outcomes))
userdata.outcome = lambda_cb(userdata)
return 'succeeded'
CallbacksState.outcome_foo_0_lambda_cb = outcome_foo_0_lambda_cb
@smach.cb_interface(input_keys=[],
output_keys=['outcome'],
outcomes=[])
def outcome_foo_1_lambda_cb(self, userdata):
lambda_cb = lambda ud: random.choice(list(self._outcomes))
userdata.outcome = lambda_cb(userdata)
return 'succeeded'
CallbacksState.outcome_foo_1_lambda_cb = outcome_foo_1_lambda_cb
@smach.cb_interface(input_keys=[],
output_keys=['outcome'],
outcomes=[])
def outcome_foo_2_lambda_cb(self, userdata):
lambda_cb = lambda ud: random.choice(list(self._outcomes))
userdata.outcome = lambda_cb(userdata)
return 'succeeded'
CallbacksState.outcome_foo_2_lambda_cb = outcome_foo_2_lambda_cb
def main():
sm = smach.StateMachine(outcomes=['final_outcome'])
with sm:
smach.StateMachine.add('RANDOMIZE',
RandomOutcomeState(callbacks = ['outcome_randomize_lambda_cb'], outcomes=['foo_0', 'foo_1', 'foo_2']),
transitions={'foo_0':'FOO_0',
'foo_1':'FOO_1',
'foo_2':'FOO_2'})
smach.StateMachine.add('FOO_0',
CallbacksState(callbacks = ['outcome_foo_0_lambda_cb']),
transitions={'succeeded':'RANDOMIZE'})
smach.StateMachine.add('FOO_1',
CallbacksState(callbacks = ['outcome_foo_1_lambda_cb']),
transitions={'succeeded':'RANDOMIZE'})
smach.StateMachine.add('FOO_2',
CallbacksState(callbacks = ['outcome_foo_2_lambda_cb']),
transitions={'succeeded':'final_outcome'})
outcome = sm.execute()
if __name__ == '__main__':
main() | bsd-3-clause | -3,053,937,580,244,111,000 | 35.107784 | 141 | 0.532427 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.1/Lib/distutils/command/build_clib.py | 1 | 8098 | """distutils.command.build_clib
Implements the Distutils 'build_clib' command, to build a C/C++ library
that is included in the module distribution and needed by an extension
module."""
__revision__ = "$Id: build_clib.py 72379 2009-05-06 07:26:24Z tarek.ziade $"
# XXX this module has *lots* of code ripped-off quite transparently from
# build_ext.py -- not surprisingly really, as the work required to build
# a static library from a collection of C source files is not really all
# that different from what's required to build a shared object file from
# a collection of C source files. Nevertheless, I haven't done the
# necessary refactoring to account for the overlap in code between the
# two modules, mainly because a number of subtle details changed in the
# cut 'n paste. Sigh.
import os
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler
from distutils import log
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build_clib(Command):
description = "build C/C++ libraries used by Python extensions"
user_options = [
('build-clib', 'b',
"directory to build C/C++ libraries to"),
('build-temp', 't',
"directory to put temporary build by-products"),
('debug', 'g',
"compile with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_clib = None
self.build_temp = None
# List of libraries to build
self.libraries = None
# Compilation options for all libraries
self.include_dirs = None
self.define = None
self.undef = None
self.debug = None
self.force = 0
self.compiler = None
def finalize_options(self):
# This might be confusing: both build-clib and build-temp default
# to build-temp as defined by the "build" command. This is because
# I think that C libraries are really just temporary build
# by-products, at least from the point of view of building Python
# extensions -- but I want to keep my options open.
self.set_undefined_options('build',
('build_temp', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
self.libraries = self.distribution.libraries
if self.libraries:
self.check_library_list(self.libraries)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# XXX same as for build_ext -- what about 'self.define' and
# 'self.undef' ?
def run(self):
if not self.libraries:
return
# Yech -- this is cut 'n pasted from build_ext.py!
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
self.build_libraries(self.libraries)
def check_library_list(self, libraries):
"""Ensure that the list of libraries is valid.
`library` is presumably provided as a command option 'libraries'.
This method checks that it is a list of 2-tuples, where the tuples
are (library_name, build_info_dict).
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(libraries, list):
raise DistutilsSetupError(
"'libraries' option must be a list of tuples")
for lib in libraries:
if not isinstance(lib, tuple) and len(lib) != 2:
raise DistutilsSetupError(
"each element of 'libraries' must a 2-tuple")
name, build_info = lib
if not isinstance(name, str):
raise DistutilsSetupError(
"first element of each tuple in 'libraries' "
"must be a string (the library name)")
if '/' in name or (os.sep != '/' and os.sep in name):
raise DistutilsSetupError("bad library name '%s': "
"may not contain directory separators" % lib[0])
if not isinstance(build_info, dict):
raise DistutilsSetupError(
"second element of each tuple in 'libraries' "
"must be a dictionary (build info)")
def get_library_names(self):
# Assume the library list is valid -- 'check_library_list()' is
# called from 'finalize_options()', so it should be!
if not self.libraries:
return None
lib_names = []
for (lib_name, build_info) in self.libraries:
lib_names.append(lib_name)
return lib_names
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
for (lib_name, build_info) in self.libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
filenames.extend(sources)
return filenames
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
sources = list(sources)
log.info("building '%s' library", lib_name)
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
| mit | -2,797,674,694,629,372,000 | 37.379147 | 76 | 0.568782 | false |
francescomarucci/VectorGeoreferencer | __init__.py | 1 | 1403 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
VectorGeoreferencer
A QGIS plugin
Deforms vector to adapt them despite heavy and irregular deformations
-------------------
begin : 2017-05-14
copyright : (C) 2017 by Francesco Marucci
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
def classFactory(iface):
# load VectorGeoreferencer class from file VectorGeoreferencer
from vectorgeoreferencer import VectorGeoreferencer
return VectorGeoreferencer(iface)
| gpl-2.0 | 1,166,935,515,956,048,400 | 52.961538 | 77 | 0.404847 | false |
illagrenan/django-make-app | django_make_app/renderers.py | 1 | 1282 | # -*- encoding: utf-8 -*-
# ! python2
from __future__ import (absolute_import, division, print_function, unicode_literals)
from jinja2 import FileSystemLoader, Environment
class TemplateRenderer(object):
def __init__(self, templates_directory, template_name, item):
"""
:type templates_directory: os.path
:type template_name: unicode
:type item: dict
"""
self._templates_directory = templates_directory
self.template_name = template_name
self.item = item
def _render_from_template(self, template_name, **kwargs):
loader = FileSystemLoader(self._templates_directory)
env = Environment(loader=loader)
template = env.get_template(template_name)
render = template.render(**kwargs)
render = render.replace("[[", "{{")
render = render.replace("]]", "}}")
render = render.replace("[%", "{%")
render = render.replace("%]", "%}")
return render
def render(self, context):
"""
:type context: dict
"""
if "_model" in self.item:
context.update({
"current_model": self.item.get('_model')
})
return self._render_from_template(self.template_name, **context)
| mit | -4,592,002,837,051,133,000 | 27.488889 | 84 | 0.583463 | false |
clarete/curdling | curdling/services/base.py | 1 | 3236 | from __future__ import absolute_import, print_function, unicode_literals
from ..signal import Signal, SignalEmitter
from ..util import logger
from distlib.compat import queue
import sys
import threading
import time
import traceback
# See `Service._worker()`. This is the sentinel that gently stops the iterator
# over there.
SENTINEL = (None, {})
# Number of threads that a service will spawn by default.
DEFAULT_CONCURRENCY = 2
class Service(SignalEmitter):
def __init__(self, size=DEFAULT_CONCURRENCY, **args):
super(Service, self).__init__()
self.size = size
self.env = args.get('env')
self.conf = args.pop('conf', {})
self.index = args.pop('index', None)
self.logger = logger(__name__)
# Components to implement the thread pool
self._queue = queue.Queue()
self.pool = []
# Declaring signals
self.started = Signal()
self.finished = Signal()
self.failed = Signal()
def queue(self, requester, **data):
self.logger.debug('%s.queue(from="%s", data="%s")', self.name, requester, data)
self._queue.put((requester, data))
return self
def start(self):
self.logger.debug('%s.start()', self.name)
for _ in range(self.size):
worker = threading.Thread(target=self._worker)
worker.daemon = True
worker.start()
self.pool.append(worker)
return self
def join(self):
# We need to separate loops cause we can't actually tell which thread
# got each sentinel
for worker in self.pool:
self._queue.put(SENTINEL)
for worker in self.pool:
worker.join()
self.workers = []
def handle(self, requester, sender_data):
raise NotImplementedError(
"The service subclass should override this method")
def __call__(self, requester, **kwargs):
return self.handle(requester, kwargs)
# -- Private API --
def _worker(self):
name = '{0}[{1}]'.format(self.name, threading.current_thread().name)
# If the service consumer invokes `.queue(None, None)` it causes the
# worker to die elegantly by matching the following sentinel:
for requester, sender_data in iter(self._queue.get, SENTINEL):
self.logger.debug('%s.run(data="%s")', name, sender_data)
try:
self.emit('started', self.name, **sender_data)
result = self(requester, **sender_data) or {}
self._queue.task_done()
except BaseException:
fname, lineno, fn, text = traceback.extract_tb(sys.exc_info()[2])[0]
self.logger.exception(
'%s.run(from="%s", data="%s") failed:\n'
'%s:%d (%s) %s',
name, requester, sender_data,
fname, lineno, fn, text,
)
sender_data.update(exception=sys.exc_info()[1])
self.emit('failed', self.name, **sender_data)
else:
self.logger.debug('%s.run(data="%s"): %s', name, sender_data, result)
self.emit('finished', self.name, **result)
| gpl-3.0 | -2,457,431,602,473,947,000 | 33.425532 | 87 | 0.57293 | false |
pazagra/catkin_ws | src/RGBDHand/src/Planeees.py | 1 | 4011 | import roslib
import rospy
import sys
import timeit
import os
import Image_Process
import cv2
import cv2.cv as cv
import numpy as np
import BoW
import Analysis
import Segmentation
from sklearn import cluster
import Descriptors
class Learn:
def __init__(self):
path = "/media/iglu/Data/Dataset/DatasetIglu/Dataset_united/Ana_point/"
i = 0
Seg = Segmentation.Segmentation()
f = open(path+"List.txt",'r')
for line in f:
print "Starting Training"
start_time = timeit.default_timer()
# code you want to evaluate
Time = line
file1 = next(f).rstrip('\n')
file2 = next(f).rstrip('\n')
Label = next(f).rstrip('\n')
RGB = cv2.imread(path+"RGB/"+file1) #[:-4]+"_RGB.jpg"
Depth = cv2.imread(path+"Depth/"+file2) #[:-4]+"_Depth.png"
Mesh = []
for i in xrange(0, Depth.shape[0]):
for j in xrange(0, Depth.shape[1]):
Mesh.append((i, j, Depth[i][j][0]))
kmeans = cluster.KMeans(Mesh,8)
print kmeans.n_clusters
# Depthv = self.Inpaint(Depth)
# Seg.CreateCloud(Depth)
# L = Seg.PlaneSegmentation()
# image = np.zeros(RGB.shape, RGB.dtype)
# depth2 = np.zeros(Depth.shape, Depth.dtype)
# for data in L:
# image[int(data[0]),int(data[1])] = RGB[int(data[0]),int(data[1])]
# depth2[int(data[0]), int(data[1])] = Depth[int(data[0]), int(data[1])]
# print Depth[int(data[0]), int(data[1])]
# Seg.CreateCloud(depth2)
# L = Seg.PlaneSegmentation()
# image2 = np.zeros(image.shape, image.dtype)
# depth3 = np.zeros(depth2.shape, depth2.dtype)
# for data in L:
# image2[int(data[0]),int(data[1])] = image[int(data[0]),int(data[1])]
# depth3[int(data[0]), int(data[1])] = depth2[int(data[0]), int(data[1])]
# print Depth[int(data[0]), int(data[1])]
elapsed = timeit.default_timer() - start_time
print "Tiempo: " + elapsed.__str__()
cv2.imshow("RGB", RGB )
cv2.imshow("Depthv", depth2)
cv2.waitKey(0) & 0xFF
cv2.destroyAllWindows()
# print "Enviado "+file+" "+i.__str__()
# if i >150:
# break
def Inpaintv1(self,Depth):
Depth_Small = Depth
Temp2 = Depth
x1 = int(Depth.shape[0] * 0.2)
x2 = int(Depth.shape[1] * 0.2)
x3 = Depth.shape[2]
cv2.resize(Depth, (x1, x2), Depth_Small)
Temp = Depth_Small
mask = (Depth_Small == 0)
zeros = np.zeros(Depth_Small.shape, Depth_Small.dtype)
ones = np.ones(Depth_Small.shape, Depth_Small.dtype)
ones *= 255
maskk = np.where(mask == True, ones, zeros)
maskk = maskk[:, :, 0]
cv2.inpaint(Depth_Small, maskk, 10.0, cv2.INPAINT_TELEA, Temp)
cv2.resize(Temp, (Depth.shape[0], Depth.shape[1]), Temp2)
return Temp2
def Inpaint(self,Depth):
Depth_Small = Depth
Temp2 = Depth
Temp = Depth_Small
mask = (Depth_Small == 0)
zeros = np.zeros(Depth_Small.shape, Depth_Small.dtype)
ones = np.ones(Depth_Small.shape, Depth_Small.dtype)
ones *= 255
maskk = np.where(mask == True, ones, zeros)
maskk = maskk[:, :, 0]
cv2.inpaint(Depth_Small, maskk, 30.0, cv2.INPAINT_TELEA, Temp)
Temp2 = Temp
return Temp2
def main(args):
# Learn()
oll = np.zeros(25)
list = ['glass','Nesquik cereales','fork','bowl','Milk Box','Coke','plate','Heinz ketchup','Apple','lime','orange','Green tea box (Lipton)','cofee mug','Special K','water bottle','banana','Bote Pringles','pitcher','kleenex box','Spoon','Diet Coke','Mayonaise Heinz','Instant noodles','knife','lemon']
if __name__ == '__main__':
main(sys.argv) | gpl-3.0 | 117,786,366,276,852,400 | 37.576923 | 304 | 0.540763 | false |
carlosperate/LightUpPi-Alarm | LightUpHardware/test/HardwareSwitch_test.py | 1 | 4048 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Unit test for the HardwareSwitch module.
#
# Copyright (c) 2015 carlosperate https://github.com/carlosperate/
#
# Licensed under The MIT License (MIT), a copy can be found in the LICENSE file
#
# These test require the Wemo Switch to be on the network at the defined IP
# address.
#
from __future__ import unicode_literals, absolute_import
import io
import mock
import unittest
from time import sleep
try:
import LightUpHardware.HardwareSwitch as HardwareSwitch
from LightUpHardware.pywemoswitch.WemoSwitch import WemoSwitch
except ImportError:
import os
import sys
file_dir = os.path.dirname(os.path.realpath(__file__))
package_dir = os.path.dirname(os.path.dirname(file_dir))
sys.path.insert(0, package_dir)
import LightUpHardware.HardwareSwitch as HardwareSwitch
from LightUpHardware.pywemoswitch.WemoSwitch import WemoSwitch
class HardwareSwitchTestCase(unittest.TestCase):
"""
Tests for HardwareSwitch functions.
These test require the Wemo Switch to be on the network at the defined IP
address.
"""
#
# Helper methods
#
def assert_stderr(self, test_srderr, equal=False):
""" Checks the stderr error string and resets it for next test. """
if equal is True:
self.assertEqual(test_srderr.getvalue(), '')
else:
self.assertNotEqual(test_srderr.getvalue(), '')
test_srderr.truncate(0)
test_srderr.write('')
self.assertEqual(test_srderr.getvalue(), '')
#
# Tests
#
def test__get_switch(self):
"""
Tests if an error is set when a switch cannot be connected. Due to the
connection timeout this test can take several seconds to complete.
"""
# We capture stderr to check for invalid input IP
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
# Invalid _coffee_switch_name causes to print an error
switch = HardwareSwitch._get_switch('127.0.0.1')
self.assert_stderr(test_srderr)
self.assertIsNone(switch)
# Test that the default IP returns a connected switch instance
switch = HardwareSwitch._get_switch()
self.assertEqual(type(switch), WemoSwitch)
def test_switch_on_off(self):
"""
Tests the switch Turns ON and OFF with the default input and a given
switch.
"""
state = HardwareSwitch.switch_on()
self.assertTrue(state)
sleep(1)
state = HardwareSwitch.switch_off()
self.assertFalse(state)
switch = HardwareSwitch._get_switch()
state = HardwareSwitch.switch_on(switch)
self.assertTrue(state)
sleep(1)
state = HardwareSwitch.switch_off(switch)
self.assertFalse(state)
def test_safe_on(self):
""" Tests the default switch Turns ON only if already ON. """
switch = HardwareSwitch._get_switch()
switch_is_on = switch.get_state()
if switch_is_on is True:
switch.turn_off()
switch_is_on = switch.get_state()
self.assertFalse(switch_is_on)
HardwareSwitch.safe_on()
switch_is_on = switch.get_state()
self.assertTrue(switch_is_on)
# We capture stderr to check for swtich already ON when called and
# mock the turn off method to check if it was called
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
with mock.patch('LightUpHardware.pywemoswitch.WemoSwitch') as \
mock_switch:
self.assert_stderr(test_srderr, True)
HardwareSwitch.safe_on()
self.assertEqual(mock_switch.turn_off.call_count, 0)
self.assert_stderr(test_srderr)
switch_is_on = switch.get_state()
self.assertTrue(switch_is_on)
# to clean up, turn the switch off
sleep(1)
switch.turn_off()
if __name__ == '__main__':
unittest.main()
| mit | -2,191,243,379,043,929,300 | 33.305085 | 79 | 0.63414 | false |
shub0/algorithm-data-structure | python/BST_iterator.py | 1 | 1180 | #! /usr/bin/python
'''
Implement an iterator over a binary search tree (BST). Your iterator will be initialized with the root node of a BST.
Calling next() will return the next smallest number in the BST.
Note: next() and hasNext() should run in average O(1) time and uses O(h) memory, where h is the height of the tree.
'''
from node_struct import TreeNode
class BSTIterator:
# @param root, a binary search tree's root node
def __init__(self, root):
self.root = root
self.path = list()
next_node = root
while next_node:
self.path.append(next_node)
next_node = next_node.left
# @return a boolean, whether we have a next smallest number
def hasNext(self):
return len(self.path) > 0
# @return an integer, the next smallest number
def next(self):
res = self.path.pop()
if res.right:
next_node = res.right
while next_node:
self.path.append(next_node)
next_node = next_node.left
return res.val
# Your BSTIterator will be called like this:
# i, v = BSTIterator(root), []
# while i.hasNext(): v.append(i.next())
| bsd-3-clause | 5,504,190,055,761,006,000 | 30.052632 | 117 | 0.620339 | false |
Loudr/pale | pale/endpoint.py | 1 | 25122 | # -*- coding: utf-8 -*-
import datetime
import json
import logging
import sys
import threading
import arrow
from pale import config as pale_config
from pale.arguments import BaseArgument
from pale.fields import ResourceField, ListField, ResourceListField
from pale.errors import APIError, ArgumentError, AuthenticationError
from pale.meta import MetaHasFields
from pale.resource import NoContentResource, Resource, DebugResource
from pale.response import PaleRaisedResponse
_tls = threading.local()
def get_current_context():
"""Return the context associated with the current request."""
return _tls.current_context
def set_current_context(context):
"""Set the context associated with the current request."""
_tls.current_context = context
class PaleDefaultJSONEncoder(json.JSONEncoder):
"""The default JSON Encoder for Pale.
The main difference between this and Python's default JSON encoder
is that this encoder attempts to serialize datetimes to ISO format,
and tries to call a `to_dict` method on the passed in object before
giving up.
"""
def default(self, obj):
"""Default JSON encoding."""
try:
if isinstance(obj, datetime.datetime):
# do the datetime thing, or
encoded = arrow.get(obj).isoformat()
else:
# try the normal encoder
encoded = json.JSONEncoder.default(self, obj)
except TypeError as e:
# if that fails, check for the to_dict method,
if hasattr(obj, 'to_dict') and callable(obj.to_dict):
# and use it!
encoded = obj.to_dict()
else:
raise e
return encoded
class Endpoint(object):
"""Base-class for implemented Endpoints."""
__metaclass__ = MetaHasFields
_response_class = None
_json_serializer = PaleDefaultJSONEncoder()
_default_cache = 'no-cache'
@classmethod
def _fix_up_fields(cls):
"""Add names to all of the Endpoint's Arguments.
This method will get called on class declaration because of
Endpoint's metaclass. The functionality is based on Google's NDB
implementation."""
cls._arguments = dict()
if cls.__module__ == __name__: # skip the classes in this file
return
for name in set(dir(cls)):
attr = getattr(cls, name, None)
if isinstance(attr, BaseArgument):
if name.startswith('_'):
raise TypeError("Endpoint argument %s cannot begin with "
"an underscore, as these attributes are reserved "
"for instance variables of the endpoint object, "
"rather than for arguments to your HTTP Endpoint."
% name)
attr._fix_up(cls, name)
cls._arguments[attr.name] = attr
def _set_response_class(self, response_class):
"""Set the response class for this endpoint.
This is usually only called by the Pale adapter,
and intended to be called with the Response object
of the HTTP layer that you're using.
"""
self._response_class = response_class
@classmethod
def _set_json_serializer(cls, serializer):
cls._json_serializer = serializer
@classmethod
def _metadata(cls, *args, **kwargs):
return dict(**kwargs)
def _handle(self, context):
"""The meat of the API logic.
This method is intended to be overridden by subclasses,
and should perform the core logic of the API method in question.
"""
pass
def _finally(self):
"""Executed after the success, or failure, of _execute()."""
pass
def _execute(self, request, **kwargs):
"""The top-level execute function for the endpoint.
This method is intended to remain as-is, and not be overridden.
It gets called by your HTTP framework's route handler, and performs
the following actions to process the request:
``authenticate_request``
Validate the Bearer token, populate the ``current_user``, and make
sure that the token covers the scope needed to call the requested
method.
*
*
``parse arguments``
The argument parser is responsible for:
- First, coercing and patching any parameters that might require
it due to versioning (i.e. the caller is using an old API
version that supports `index` as a parameter for pagination,
but the current version uses the name `offset`)
- Second, iterating through the endpoint's supported arguments
and validating that the params passed in comply with the
endpoint's requirements
- Third, populating the `context.args` array with the validated
arguments
If any of the arguments are invalid, then the Argument parser will
raise an ArgumentError that bubbles up to the `try/catch` block of
the execute method.
*
*
``before handler``
The before_handlers are specified by the Endpoint definition, and
are intended to supporty DRY-ing up your codebase. Have a set of
Endpoints that all need to grab an object from the ORM based on the
same parameter? Make them inherit from an Endpoint subclass that
performs that task in a before_handler!
*
*
``handle``
The core logic of your API endpoint, as implemented by you in your
Endpoint subclass. The API Framework expects ``handle`` to return
a dictionary specifying the response object and the JSON key that
it should hang off of, or a tuple of a dictionary and an HTTP status
code.
*
*
``after_handler``
Like the before_handlers, the ``after_handlers`` happen after the
handle method, and allow the endpoint developer to re-use code for
post-processing data from an endpoint.
*
*
``render response``
Like the argument parser, the response renderer is responsible for
a few things:
- First, it converts the ORM objects into JSON-serializable
Python dictionaries using the Resource objects defined by the
API implementation,
- Second, it does any version parameter coersion, renaming and
reformatting the edge version of the response to match the
version requested by the API caller,
- and Third, it serializes the Python dictionary into the response
format requested by the API caller (right now, we only support
JSON responses, but it'd be reasonble to support something like
HTML or XML or whatever in the future).
The rendered JSON text is then returned as the response that should
be sent by your HTTP framework's routing handler.
*
*
``_after_response_handler``
The `_after_response_handlers` are specified by the Endpoint
definition, and enable manipulation of the response object before it
is returned to the client, but after the response is rendered.
Because these are instancemethods, they may share instance data
from `self` specified in the endpoint's `_handle` method.
``_finalize_content``
The `_finalize_content` method is overridden by the Endpoint and is called
after the response is rendered into a serializable result.
This method is called with two arguments, the context and the rendered content,
and expected to return updated rendered content.
For in-place modification of dicts, this method will still be expected
to return the given argument.
``_allow_cors``
This value is set to enable CORs for a given endpoint.
When set to a string it supplies an explicit value to
'Access-Control-Allow-Origin'.
Set to True, this will allow access from *all* domains;
Access-Control-Allow-Origin = "*"
"""
try:
self._create_context(request)
self._authenticate()
context = get_current_context()
self._parse_args()
if hasattr(self, '_before_handlers') and \
isinstance(self._before_handlers, (list, tuple)):
for handler in self._before_handlers:
handler(context)
context.handler_result = self._handle(context)
if hasattr(self, '_after_handlers') and \
isinstance(self._after_handlers, (list, tuple)):
for handler in self._after_handlers:
handler(context)
self._render()
response = context.response
# After calling ._render(), the response is ready to go, so we
# shouldn't need to handle any other exceptions beyond this point.
except AuthenticationError as e:
if hasattr(e, 'message') and e.message is not None:
message = e.message
else:
message = "You don't have permission to do that."
err = APIError.Forbidden(message)
response = self._response_class(*err.response)
response.headers["Content-Type"] = 'application/json'
except ArgumentError as e:
err = APIError.UnprocessableEntity(e.message)
response = self._response_class(*err.response)
response.headers["Content-Type"] = 'application/json'
except APIError as e:
response = self._response_class(*e.response)
response.headers["Content-Type"] = 'application/json'
except PaleRaisedResponse as r:
response = self._response_class(*r.response)
response.headers["Content-Type"] = 'application/json'
except Exception as e:
logging.exception("Failed to handle Pale Endpoint %s: %r", self.__class__.__name__,
e)
err = APIError.Exception(repr(e))
response = self._response_class(*err.response)
response.headers["Content-Type"] = 'application/json'
allow_cors = getattr(self, "_allow_cors", None)
if allow_cors is True:
response.headers['Access-Control-Allow-Origin'] = '*'
elif isinstance(allow_cors, basestring):
response.headers['Access-Control-Allow-Origin'] = allow_cors
context.response = response
try:
if hasattr(self, '_after_response_handlers') and \
isinstance(self._after_response_handlers, (list, tuple)):
for handler in self._after_response_handlers:
handler(context, response)
except Exception as e:
logging.exception(
"Failed to process _after_response_handlers for Endpoint %s",
self.__class__.__name__)
raise
return response
def _create_context(self, request):
if pale_config.create_context is None:
raise ValueError((
"\n\nPale does not appear to be configured, as there is no "
"context creator currently set!\n\n"))
context = pale_config.create_context(self, request)
set_current_context(context)
def _authenticate(self):
if pale_config.authenticate_context is None:
raise ValueError((
"\n\nPale does not appear to be configured, as there is no "
"context authenticator currently set!\n\n"))
pale_config.authenticate_context(get_current_context())
def _patch_args(self):
# do something like:
# version = context.api_version
# coersion_dict = self.grab_version_coersion_info_from_impl(version)
# self.patched_args = self.coerce(self._raw_args, coersion_dict)
# but for now, just push the raw args through
context = get_current_context()
context.patched_args = context._raw_args
def _parse_args(self):
context = get_current_context()
self._patch_args()
parsed_args = dict()
if self._arguments is not None:
if not isinstance(self._arguments, dict):
raise ValueError("""Your API implementation is broken. This
endpoint's `arguments` value is a `%s` when it should be a dict
instead. Please see the Pale documentation for information on
how to fix the problem.""" % (type(self.arguments), ))
for arg_name, arg_obj in self._arguments.iteritems():
patched_value = context.patched_args.get(arg_name, None)
# HTTP libraries are crap, so we expect `patched_value` to
# be a list, which we strip out if the length is 1 and if the
# validator doesn't expect a list
if patched_value is not None and \
isinstance(patched_value, list) and \
len(patched_value) == 1 and \
list not in arg_obj.allowed_types:
patched_value = patched_value[0]
# validate will return the validated (and thus valid) value on
# success, or raise an ArgumentError if the value is invalid
validated_value = arg_obj.validate(patched_value, arg_name)
if validated_value is not None:
parsed_args[arg_name] = validated_value
context.args = parsed_args
def _parse_handler_result(self, result):
"""Parses the item(s) returned by your handler implementation.
Handlers may return a single item (payload), or a tuple that gets
passed to the Response class __init__ method of your HTTP layer.
_parse_handler_result separates the payload from the rest the tuple,
as well as providing the tuple so that it can be re-composed after
the payload has been run through the `_returns` Resource's renderer.
"""
if isinstance(result, (list, tuple)):
payload = result[0]
list_result = list(result)
else:
payload = result
list_result = [""]
return payload, list_result
def _render(self):
# first, serialize the Python objects in the response_dict into a dict
context = get_current_context()
rendered_content = dict()
unrendered_content, response_init_list = self._parse_handler_result(
context.handler_result)
if hasattr(unrendered_content, 'iteritems'):
for k, v in unrendered_content.iteritems():
# usually there should only be one key and value here
dict_val = self._returns._render_serializable(v, context)
# this is where object versioning should be implemented, but
# one outstanding question with it is, should this be the
# responsibility of the Resource object, or of the endpoint?
# coerced_dict_val = self.returns.versionify(dict_val,
# context.api_version)
rendered_content[k] = dict_val
else:
# maybe it's a nonetype or a simple string?
rendered_content = self._returns._render_serializable(
unrendered_content, context)
try:
if hasattr(self, '_finalize_content'):
rendered_content = self._finalize_content(context, rendered_content)
except:
logging.exception("Failed to complete %s._finalize_content",
self.__class__.__name__)
raise
# now build the response
if rendered_content is None and \
isinstance(self._returns, NoContentResource):
json_content = ''
else:
json_content = self._json_serializer.encode(rendered_content)
response_init_list[0] = json_content
response_init_tuple = tuple(response_init_list)
if self._response_class is None:
raise ValueError("""Error with Pale configuration. Attempted to
parse a handler result without a response class set on the endpoint.
This is probably an issue with the pale HTTP adapter you're using,
since that is where the response class is usually set.""")
context.response = self._response_class(*response_init_tuple)
# patch up cache-control
updated_cache_ctrl_from_endpoint = False
if len(response_init_tuple) > 2:
# headers is the 3rd arg for both flask and webapp2
headers = response_init_tuple[2]
cache_ctrl = headers.get('Cache-Control')
if cache_ctrl is not None:
context.response.headers['Cache-Control'] = cache_ctrl
updated_cache_ctrl_from_endpoint = True
if not updated_cache_ctrl_from_endpoint:
context.response.headers['Cache-Control'] = \
self._default_cache
# Add default json response type.
if len(json_content):
context.response.headers["Content-Type"] = 'application/json'
else:
del context.response.content_type
del context.response.content_length
status_code = getattr(context.response, "status_int", None) or context.response.status_code
if status_code == 200: # 200 OK
context.response.status = '204 No Content'
class ResourcePatch(object):
"""Represents a resource patch which is to be applied
to a given dictionary or object."""
def __init__(self, patch, resource, ignore_missing_fields=False):
self.patch = patch
self.resource = resource
self.ignore_missing_fields = ignore_missing_fields
def get_field_from_resource(self, field):
if isinstance(self.resource, DebugResource):
# no fields defined in a DebugResource
return None
try:
return self.resource._fields[field]
except KeyError:
if not self.ignore_missing_fields:
raise APIError.BadRequest(
"Field '%s' is not expected." % field)
return None
def get_resource_from_field(self, field):
assert isinstance(field, ResourceField)
return field.resource_type()
def cast_value(self, field, value):
if isinstance(field, ResourceListField):
if not isinstance(value, dict):
raise APIError.BadRequest(
"Expected nested object in list for %s" % field)
try:
resource = field.resource_type()
if isinstance(resource, DebugResource):
return value.copy()
new_object = dict()
for k,v in value.iteritems():
if not k in resource._fields and self.ignore_missing_fields:
new_object[k] = v
continue
_field = resource._fields[k]
if _field.property_name is not None:
k = _field.property_name
new_object[k] = self.cast_value(_field, v)
if not getattr(resource, "_underlying_model", None):
return new_object
return resource._underlying_model(**new_object)
except Exception:
logging.exception(
"Failed to cast value to _underlying_model of resource_type: %s" %
getattr(field, 'resource_type', None))
raise
# TODO: Use field to cast field back into a value,
# if possible.
return value
def apply_to_dict(self, dt):
for k,v in self.patch.iteritems():
field = self.get_field_from_resource(k)
if field is None:
dt[k] = v
continue
elif isinstance(v, dict):
# Recursive application.
resource = self.get_resource_from_field(field)
patch = ResourcePatch(v, resource,
ignore_missing_fields=self.ignore_missing_fields)
patch.apply_to_dict(dt[k])
elif isinstance(v, list):
if (not isinstance(field, ResourceListField) and
not isinstance(field, ListField)):
raise APIError.BadRequest(
"List not expected for field '%s'" % k)
new_list = []
for itm in v:
new_list.append(self.cast_value(field, itm))
dt[k] = new_list
else:
# Cast value and store
dt[k] = self.cast_value(field, v)
def apply_to_model(self, dt):
for k,v in self.patch.iteritems():
field = self.get_field_from_resource(k)
if field is None:
setattr(dt, k, v)
elif isinstance(v, dict):
# Recursive application.
resource = self.get_resource_from_field(field)
patch = ResourcePatch(v, resource,
ignore_missing_fields=self.ignore_missing_fields)
patch.apply_to_model(getattr(dt, k, None))
elif isinstance(v, list):
if (not isinstance(field, ResourceListField) and
not isinstance(field, ListField)):
raise APIError.BadRequest(
"List not expected for field '%s'" % k)
new_list = []
for itm in v:
new_list.append(self.cast_value(field, itm))
setattr(dt, k, new_list)
else:
# Cast value and set
setattr(dt, k, self.cast_value(field, v))
class PatchEndpoint(Endpoint):
"""Provides a base endpoint for implementing JSON Merge Patch requests.
See RFC 7386 @ https://tools.ietf.org/html/rfc7386
"""
MERGE_CONTENT_TYPE = 'application/merge-patch+json'
_http_method = "PATCH"
def _handle_patch(self, context, patch):
raise NotImplementedError("%s should override _handle_patch" %
self.__class__.__name__)
def _handle(self, context):
resource = getattr(self, "_resource", None)
if not isinstance(resource, Resource):
raise NotImplementedError(
"%s needs to define _resource: Resource which will be patched" %
self.__class__.__name__)
if (context.headers.get('Content-Type').lower() !=
self.MERGE_CONTENT_TYPE):
raise APIError.UnsupportedMedia("PATCH expects content-type %r" %
self.MERGE_CONTENT_TYPE)
try:
patch = ResourcePatch(patch=json.loads(context.body),
resource=resource)
except Exception, exc:
raise APIError.UnprocessableEntity(
"Could not decode JSON from request payload: %s" %
exc)
return self._handle_patch(context, patch)
class PutResourceEndpoint(Endpoint):
"""Provides a base endpoint for implementing JSON PUT resource.
See RFC 7386 @ https://tools.ietf.org/html/rfc7386
"""
MERGE_CONTENT_TYPE = 'application/json'
_http_method = "PUT"
def _handle_put(self, context, patch):
raise NotImplementedError("%s should override _handle_patch" %
self.__class__.__name__)
def _handle(self, context):
resource = getattr(self, "_resource", None)
if not isinstance(resource, Resource):
raise NotImplementedError(
"%s needs to define _resource: Resource which will be patched" %
self.__class__.__name__)
if (context.headers.get('Content-Type').lower() !=
self.MERGE_CONTENT_TYPE):
raise APIError.UnsupportedMedia("PATCH expects content-type %r" %
self.MERGE_CONTENT_TYPE)
try:
patch = ResourcePatch(patch=json.loads(context.body),
resource=resource)
except Exception, exc:
raise APIError.UnprocessableEntity(
"Could not decode JSON from request payload: %s" %
exc)
return self._handle_put(context, patch)
| mit | -4,322,117,158,030,105,000 | 39.454106 | 103 | 0.578656 | false |
code-ape/SocialJusticeDataProcessing | category.py | 1 | 6149 | import tools
import settings
def base_demographic(data, demographic_questions):
breakdowns = {}
for question_num in demographic_questions:
responses = tools.get_responses_to_number(question_num, data)
title = tools.get_question_title(question_num, data)
values = tools.extract_vals_from_responses(responses)[0]
breakdown = create_breakdown(values)
breakdowns[title] = breakdown
return breakdowns
def generate_answer_response_lists(data, opinion_questions):
print("Generating answer response list.")
answer_response_dict = {}
for question_num in opinion_questions:
responses = tools.get_responses_to_number(question_num, data)
values = tools.extract_vals_from_responses(responses, data)[0]
title = tools.get_question_title(question_num, data)
index_breakdown = create_index_breakdown(values)
answer_response_dict[title] = index_breakdown
print("Done generating answer response list.")
return answer_response_dict
def generate_demographic_for_response_lists(answer_response_lists, data):
count = 0
question_dict = {}
for title, response_dict in answer_response_lists.iteritems():
question_num = tools.get_question_num_with_title(title, data)
answer_breakdown_dict = {}
for response_val, response_nums in response_dict.iteritems():
responses = []
for response_num in response_nums:
responses.append(data[response_num])
breakdowns = base_demographic(responses, settings.student_demographic_questions)
count += len(breakdowns)
answer_breakdown_dict[response_val] = breakdowns
question_dict[title] = answer_breakdown_dict
print("generate_demographic_for_response_lists did {} breakdowns.".format(count))
return question_dict
def calc_demographic_diff(base_demographic, opinion_demographic_dict):
opinion_demographic_diff_dict = {}
for question_name, answer_dict in opinion_demographic_dict.iteritems():
answer_diff_dict = {}
for choice, demographic in answer_dict.iteritems():
answer_diff_dict[choice] = create_demographic_diff(base_demographic, demographic)
opinion_demographic_diff_dict[question_name] = answer_diff_dict
return opinion_demographic_diff_dict
def find_interesting_demographic_changes(opinion_demographic_diff_dict):
interesting_demographic_changes = []
threshold = 0.3
counter = 0
for question_name, answer_dict in opinion_demographic_diff_dict.iteritems():
for choice, demographic in answer_dict.iteritems():
for title, breakdown in demographic.iteritems():
for answer, nums in breakdown.iteritems():
percent_shift = nums["percent_shift"]
if percent_shift > 25 or percent_shift < -25:
interesting_demographic_changes.append({
"question": question_name,
"question_choice": choice,
"demographic_title": title,
"demographic_answer": answer,
"percent_shift": percent_shift
})
counter += 1
print("Found {} interesting results".format(counter))
return interesting_demographic_changes
def save_interesting_demographics_changes_to_file(interesting_demographic_changes, path):
print("Saving {} interesting demographic change entries to: {}".format(
len(interesting_demographic_changes), path
))
with open(path, "w") as f:
for entry in interesting_demographic_changes:
f.write("Question: {}\n".format(entry["question"]))
f.write("Choice: {}\n".format(entry["question_choice"]))
f.write("Demographic Category: {}\n".format(entry["demographic_title"]))
f.write("Demographic: {}\n".format(entry["demographic_answer"]))
f.write("Shift: {}\n\n\n".format(entry["percent_shift"]))
print("Done saving entries.")
def print_breakdown(title, breakdown):
print("\n\nBreakdown for {}".format(title))
for val, nums in breakdown.iteritems():
print("{}: {}, {:.1f}%".format(val, nums['number'], nums['percentage']))
def create_breakdown(values):
answer_dict = {}
# really hacky way of handling answers with where multiple
# options could be choosen
for val in values:
choices = None
if not isinstance(val, list):
choices = [val]
else:
choices = val
for choice in choices:
if choice not in answer_dict:
answer_dict[choice] = 0
answer_dict[choice] += 1
breakdown_dict = {}
total_values = float(len(values))
for val, num in answer_dict.iteritems():
breakdown_dict[val] = {"number": num, "percentage": 100*num/total_values}
return breakdown_dict
def create_index_breakdown(values):
breakdown = {}
count = 0
for val in values:
choices = None
if not isinstance(val, list):
choices = [val]
else:
choices = val
for choice in choices:
if choice not in breakdown:
breakdown[choice] = []
breakdown[choice].append(count)
count+=1
return breakdown
def create_demographic_diff(base_demographic, contrast_demographic):
demographic_diff = {}
for title, breakdown in base_demographic.iteritems():
contrast_breakdown = contrast_demographic[title]
breakdown_diff = {}
for answer, nums in breakdown.iteritems():
contrast_nums = None
if answer in contrast_breakdown:
contrast_nums = contrast_breakdown[answer]
else:
contrast_nums = {"percentage": 0}
shift = contrast_nums["percentage"] - nums["percentage"]
breakdown_diff[answer] = {
"percent_shift": shift
}
demographic_diff[title] = breakdown_diff
return demographic_diff
| apache-2.0 | -8,700,736,649,998,507,000 | 37.192547 | 93 | 0.623191 | false |
ASMlover/study | compiler/eLisp/eLisp/expr/definition.py | 1 | 1898 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2015 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from eLisp.model import Symbol
from eLisp.expr.lambdaexpr import make_lambda
from eLisp.expr.util import (
is_tagged_list, is_symbol, cadr, caadr, caddr, cdadr, cddr)
def is_definition(expr):
return is_tagged_list(expr, Symbol('define'))
def definition_variable(expr):
if is_symbol(cadr(expr)):
return cadr(expr)
return caadr(expr)
def definition_value(expr):
if is_symbol(cadr(expr)):
return caddr(expr)
return make_lambda(cdadr(expr), cddr(expr))
| bsd-2-clause | 3,584,847,533,250,403,000 | 37.734694 | 70 | 0.748156 | false |
cklein/wtforms | tests/form.py | 1 | 8085 | from __future__ import unicode_literals
from unittest import TestCase
from wtforms.form import BaseForm, Form
from wtforms.meta import DefaultMeta
from wtforms.fields import StringField, IntegerField
from wtforms.validators import ValidationError
from tests.common import DummyPostData
class BaseFormTest(TestCase):
def get_form(self, **kwargs):
def validate_test(form, field):
if field.data != 'foobar':
raise ValidationError('error')
return BaseForm({'test': StringField(validators=[validate_test])}, **kwargs)
def test_data_proxy(self):
form = self.get_form()
form.process(test='foo')
self.assertEqual(form.data, {'test': 'foo'})
def test_errors_proxy(self):
form = self.get_form()
form.process(test='foobar')
form.validate()
self.assertEqual(form.errors, {})
form = self.get_form()
form.process()
form.validate()
self.assertEqual(form.errors, {'test': ['error']})
def test_contains(self):
form = self.get_form()
self.assertTrue('test' in form)
self.assertTrue('abcd' not in form)
def test_field_removal(self):
form = self.get_form()
del form['test']
self.assertRaises(AttributeError, getattr, form, 'test')
self.assertTrue('test' not in form)
def test_field_adding(self):
form = self.get_form()
self.assertEqual(len(list(form)), 1)
form['foo'] = StringField()
self.assertEqual(len(list(form)), 2)
form.process(DummyPostData(foo=['hello']))
self.assertEqual(form['foo'].data, 'hello')
form['test'] = IntegerField()
self.assertTrue(isinstance(form['test'], IntegerField))
self.assertEqual(len(list(form)), 2)
self.assertRaises(AttributeError, getattr, form['test'], 'data')
form.process(DummyPostData(test=['1']))
self.assertEqual(form['test'].data, 1)
self.assertEqual(form['foo'].data, '')
def test_populate_obj(self):
m = type(str('Model'), (object, ), {})
form = self.get_form()
form.process(test='foobar')
form.populate_obj(m)
self.assertEqual(m.test, 'foobar')
self.assertEqual([k for k in dir(m) if not k.startswith('_')], ['test'])
def test_prefixes(self):
form = self.get_form(prefix='foo')
self.assertEqual(form['test'].name, 'foo-test')
self.assertEqual(form['test'].short_name, 'test')
self.assertEqual(form['test'].id, 'foo-test')
form = self.get_form(prefix='foo.')
form.process(DummyPostData({'foo.test': ['hello'], 'test': ['bye']}))
self.assertEqual(form['test'].data, 'hello')
self.assertEqual(self.get_form(prefix='foo[')['test'].name, 'foo[-test')
def test_formdata_wrapper_error(self):
form = self.get_form()
self.assertRaises(TypeError, form.process, [])
class FormMetaTest(TestCase):
def test_monkeypatch(self):
class F(Form):
a = StringField()
self.assertEqual(F._unbound_fields, None)
F()
self.assertEqual(F._unbound_fields, [('a', F.a)])
F.b = StringField()
self.assertEqual(F._unbound_fields, None)
F()
self.assertEqual(F._unbound_fields, [('a', F.a), ('b', F.b)])
del F.a
self.assertRaises(AttributeError, lambda: F.a)
F()
self.assertEqual(F._unbound_fields, [('b', F.b)])
F._m = StringField()
self.assertEqual(F._unbound_fields, [('b', F.b)])
def test_subclassing(self):
class A(Form):
a = StringField()
c = StringField()
class B(A):
b = StringField()
c = StringField()
A()
B()
self.assertTrue(A.a is B.a)
self.assertTrue(A.c is not B.c)
self.assertEqual(A._unbound_fields, [('a', A.a), ('c', A.c)])
self.assertEqual(B._unbound_fields, [('a', B.a), ('b', B.b), ('c', B.c)])
def test_class_meta_reassign(self):
class MetaA:
pass
class MetaB:
pass
class F(Form):
Meta = MetaA
self.assertEqual(F._wtforms_meta, None)
assert isinstance(F().meta, MetaA)
assert issubclass(F._wtforms_meta, MetaA)
F.Meta = MetaB
self.assertEqual(F._wtforms_meta, None)
assert isinstance(F().meta, MetaB)
assert issubclass(F._wtforms_meta, MetaB)
class FormTest(TestCase):
class F(Form):
test = StringField()
def validate_test(form, field):
if field.data != 'foobar':
raise ValidationError('error')
def test_validate(self):
form = self.F(test='foobar')
self.assertEqual(form.validate(), True)
form = self.F()
self.assertEqual(form.validate(), False)
def test_field_adding_disabled(self):
form = self.F()
self.assertRaises(TypeError, form.__setitem__, 'foo', StringField())
def test_field_removal(self):
form = self.F()
del form.test
self.assertTrue('test' not in form)
self.assertEqual(form.test, None)
self.assertEqual(len(list(form)), 0)
# Try deleting a nonexistent field
self.assertRaises(AttributeError, form.__delattr__, 'fake')
def test_delattr_idempotency(self):
form = self.F()
del form.test
self.assertEqual(form.test, None)
# Make sure deleting a normal attribute works
form.foo = 9
del form.foo
self.assertRaises(AttributeError, form.__delattr__, 'foo')
# Check idempotency
del form.test
self.assertEqual(form.test, None)
def test_ordered_fields(self):
class MyForm(Form):
strawberry = StringField()
banana = StringField()
kiwi = StringField()
self.assertEqual([x.name for x in MyForm()], ['strawberry', 'banana', 'kiwi'])
MyForm.apple = StringField()
self.assertEqual([x.name for x in MyForm()], ['strawberry', 'banana', 'kiwi', 'apple'])
del MyForm.banana
self.assertEqual([x.name for x in MyForm()], ['strawberry', 'kiwi', 'apple'])
MyForm.strawberry = StringField()
self.assertEqual([x.name for x in MyForm()], ['kiwi', 'apple', 'strawberry'])
# Ensure sort is stable: two fields with the same creation counter
# should be subsequently sorted by name.
MyForm.cherry = MyForm.kiwi
self.assertEqual([x.name for x in MyForm()], ['cherry', 'kiwi', 'apple', 'strawberry'])
def test_data_arg(self):
data = {'test': 'foo'}
form = self.F(data=data)
self.assertEqual(form.test.data, 'foo')
form = self.F(data=data, test='bar')
self.assertEqual(form.test.data, 'bar')
def test_empty_formdata(self):
""""If formdata is empty, field.process_formdata should still run to handle empty data."""
self.assertEqual(self.F(DummyPostData({'other': 'other'})).test.data, '')
self.assertEqual(self.F(DummyPostData()).test.data, '')
class MetaTest(TestCase):
class F(Form):
class Meta:
foo = 9
test = StringField()
class G(Form):
class Meta:
foo = 12
bar = 8
class H(F, G):
class Meta:
quux = 42
class I(F, G):
pass
def test_basic(self):
form = self.H()
meta = form.meta
self.assertEqual(meta.foo, 9)
self.assertEqual(meta.bar, 8)
self.assertEqual(meta.csrf, False)
assert isinstance(meta, self.F.Meta)
assert isinstance(meta, self.G.Meta)
self.assertEqual(type(meta).__bases__, (
self.H.Meta,
self.F.Meta,
self.G.Meta,
DefaultMeta
))
def test_missing_diamond(self):
meta = self.I().meta
self.assertEqual(type(meta).__bases__, (
self.F.Meta,
self.G.Meta,
DefaultMeta
))
| bsd-3-clause | 2,748,992,602,378,828,000 | 30.956522 | 98 | 0.574644 | false |
drewverlee/art | tests/test_app_user_interaction.py | 1 | 2114 | #!/usr/local/bin/python
import unittest
import sys
from selenium import webdriver
servers = {
'live' : 'http://safe-sands-8472.herokuapp.com/',
'local': 'http://127.0.0.1:5000/'
}
class TestApp(unittest.TestCase):
BASE = servers['local']
@classmethod
def setUpClass(cls):
cls.c = webdriver.PhantomJS()
def setUp(self):
self.c.get(self.BASE)
def tearDown(self):
self.c.quit()
def test_app(self):
#test layout
self.assertEqual("DREW'S ART", self.c.find_element_by_class_name('logo').text)
self.assertTrue('about' in self.c.find_element_by_tag_name('nav').text.lower())
self.assertTrue('about' in self.c.find_element_by_id('fnav').text.lower())
self.assertEqual(3, len(self.c.find_elements_by_class_name('cover_art')))
self.assertEqual(4, len(self.c.find_elements_by_tag_name('meta')))
self.assertTrue('NY' in self.c.find_element_by_class_name('copy_right').text)
#test index/home
self.assertEqual('Home', self.c.title)
self.assertEqual(3, len(self.c.find_elements_by_class_name('cover_art')))
# test purchase
self.c.find_element_by_class_name('purchase_link').click()
self.assertEqual('Purchase', self.c.title)
self.assertTrue(self.c.find_element_by_class_name('art'))
self.assertTrue('purchasing' in self.c.find_element_by_class_name('purchase_info').text)
self.assertTrue('small' in self.c.find_element_by_class_name('price').text)
self.assertTrue(self.c.find_element_by_class_name('email-link'))
# test about
self.c.find_element_by_class_name('about_link').click()
self.assertEqual('About', self.c.title)
self.assertTrue(self.c.find_element_by_class_name('picture'))
self.assertTrue('drew' in self.c.find_element_by_class_name('text').text)
self.assertTrue(self.c.find_element_by_class_name('art'))
if __name__ == '__main__':
if len(sys.argv) > 1: TestApp.BASE = servers[sys.argv.pop()]
unittest.main()
| mit | 6,670,995,368,726,679,000 | 31.030303 | 96 | 0.625828 | false |
rlowrance/re-local-linear | transactions-subset2-test.py | 1 | 3987 | # create files
# WORKING/transactions-subset2-train.pickle
# WORKING/transactions-subset2-test.pickle
#
# The test data consists of a 10% random sample of all the data.
#
# Unlike the R version, the data are not stratified by sale month.
# import built-ins and libraries
import numpy as np
import pandas as pd
import pdb
import sys
from sklearn import cross_validation
# import my stuff
from directory import directory
from Logger import Logger
class Control(object):
def __init__(self):
me = 'transactions-subset2-test'
working = directory('working')
log = directory('log')
base = 'transactions-subset2'
self.path_out_test = working + base + '-test.pickle'
self.path_out_train = working + base + '-train.pickle'
self.path_out_log = log + me + '.log'
self.path_in_data = working + 'transactions-subset2.pickle'
self.test_sample = .10
self.random_seed = 123
self.testing = False
def randomly_split(df, fraction_to_testing, random_state):
'''Randomly shuffly observations and split into train and test.'''
ss = cross_validation.ShuffleSplit(n=df.shape[0],
n_iter=1,
test_size=fraction_to_testing,
random_state=random_state)
# extract the train and test indices
# there should be exactly one set of such
num_iterations = 0
for train_index, test_index in ss:
num_iterations = num_iterations + 1
train_indices = train_index
test_indices = test_index
assert num_iterations == 1
test = df.iloc[test_indices]
train = df.iloc[train_indices]
return test, train
def analyze(test, train):
print 'test.shape', test.shape
print 'train.shape', train.shape
# check range of sale.year
min_year = min(np.amin(test['sale.year']),
np.amin(train['sale.year']))
max_year = max(np.amax(test['sale.year']),
np.amax(train['sale.year']))
assert min_year == 2003
print max_year
assert max_year == 2009
print 'year, month, #test, #train'
for year in (2003, 2004, 2005, 2006, 2007, 2008, 2009):
last_month_index = 12 if year != 2009 else 3
for month_index in range(last_month_index):
month = month_index + 1
is_month_test = np.logical_and(test['sale.year'] == year,
test['sale.month'] == month)
in_month_test = test[is_month_test]
is_month_train = np.logical_and(train['sale.year'] == year,
train['sale.month'] == month)
in_month_train = train[is_month_train]
print year, month, in_month_test.shape[0], in_month_train.shape[0]
def main():
control = Control()
sys.stdout = Logger(logfile_path=control.path_out_log)
# log the control variables
for k, v in control.__dict__.iteritems():
print 'control', k, v
df = pd.read_pickle(control.path_in_data)
# make sure that sale.python_date is never NaN (null)
if False:
dates = df['sale.python_date']
if dates.isnull().any():
raise ValueError('at least one sale.python_date is null')
if control.testing and False:
pdb.set_trace()
df = df[0:30]
# print columns in df
print 'df.shape', df.shape
for column_name in df.columns:
print 'df column name', column_name
test, train = randomly_split(df=df,
fraction_to_testing=control.test_sample,
random_state=control.random_seed)
analyze(test, train)
test.to_pickle(control.path_out_test)
train.to_pickle(control.path_out_train)
# log the control variables
for k, v in control.__dict__.iteritems():
print 'control', k, v
print 'done'
if __name__ == '__main__':
main()
| mit | -6,138,869,236,464,987,000 | 29.906977 | 78 | 0.589917 | false |
Sult/daf | apps/corporations/models/corporations.py | 1 | 1185 | from django.db import models
#from django.conf import settings
#from config.storage import OverwriteStorage
#from utils.common import icon_size_name
from utils.connection import *
class CorporationApi(models.Model):
""" charactertype apis """
api = models.OneToOneField('apies.Api')
corporationid = models.BigIntegerField()
corporationname = models.CharField(max_length=254)
characterid = models.BigIntegerField()
def __unicode__(self):
return self.corporationname
#class CorporationIcon(models.Model):
#""" images related to characters """
#relation = models.ForeignKey("corporations.Corporation")
#size = models.IntegerField(choices=settings.IMAGE_SIZES)
#typeid = models.IntegerField(unique=True)
#icon = models.ImageField(
#upload_to="images/corporations/",
#storage=OverwriteStorage(),
#blank=True, null=True)
#class Meta:
#unique_together = ["size", "relation"]
#def __unicode__(self):
#return "Corporation Image %s" % icon_size_name(self.size)
##get list of wanted character icon sizes
#@staticmethod
#def icon_sizes():
#return [32, 64, 128, 256]
| mit | -2,615,864,952,555,058,000 | 27.902439 | 66 | 0.681857 | false |
googleapis/python-compute | google/cloud/compute_v1/services/addresses/transports/rest.py | 1 | 20223 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import gapic_v1 # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.auth.transport.requests import AuthorizedSession
from google.cloud.compute_v1.types import compute
from .base import AddressesTransport, DEFAULT_CLIENT_INFO
class AddressesRestTransport(AddressesTransport):
"""REST backend transport for Addresses.
The Addresses API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._prep_wrapped_messages(client_info)
def aggregated_list(
self,
request: compute.AggregatedListAddressesRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.AddressAggregatedList:
r"""Call the aggregated list method over HTTP.
Args:
request (~.compute.AggregatedListAddressesRequest):
The request object. A request message for
Addresses.AggregatedList. See the method
description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.AddressAggregatedList:
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/aggregated/addresses".format(
host=self._host, project=request.project,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
if compute.AggregatedListAddressesRequest.filter in request:
query_params["filter"] = request.filter
if compute.AggregatedListAddressesRequest.include_all_scopes in request:
query_params["includeAllScopes"] = request.include_all_scopes
if compute.AggregatedListAddressesRequest.max_results in request:
query_params["maxResults"] = request.max_results
if compute.AggregatedListAddressesRequest.order_by in request:
query_params["orderBy"] = request.order_by
if compute.AggregatedListAddressesRequest.page_token in request:
query_params["pageToken"] = request.page_token
if compute.AggregatedListAddressesRequest.return_partial_success in request:
query_params["returnPartialSuccess"] = request.return_partial_success
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.get(url, headers=headers,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.AddressAggregatedList.from_json(
response.content, ignore_unknown_fields=True
)
def delete(
self,
request: compute.DeleteAddressRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteAddressRequest):
The request object. A request message for
Addresses.Delete. See the method
description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses.
Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource.
- For regional operations, use the ``regionOperations``
resource.
- For zonal operations, use the ``zonalOperations``
resource.
For more information, read Global, Regional, and Zonal
Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/addresses/{address}".format(
host=self._host,
project=request.project,
region=request.region,
address=request.address,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
if compute.DeleteAddressRequest.request_id in request:
query_params["requestId"] = request.request_id
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.delete(url, headers=headers,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.Operation.from_json(response.content, ignore_unknown_fields=True)
def get(
self,
request: compute.GetAddressRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Address:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetAddressRequest):
The request object. A request message for Addresses.Get.
See the method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Address:
Use global external addresses for GFE-based external
HTTP(S) load balancers in Premium Tier.
Use global internal addresses for reserved peering
network range.
Use regional external addresses for the following
resources:
- External IP addresses for VM instances - Regional
external forwarding rules - Cloud NAT external IP
addresses - GFE based LBs in Standard Tier - Network
LBs in Premium or Standard Tier - Cloud VPN gateways
(both Classic and HA)
Use regional internal IP addresses for subnet IP ranges
(primary and secondary). This includes:
- Internal IP addresses for VM instances - Alias IP
ranges of VM instances (/32 only) - Regional internal
forwarding rules - Internal TCP/UDP load balancer
addresses - Internal HTTP(S) load balancer addresses
- Cloud DNS inbound forwarding IP addresses
For more information, read reserved IP address.
(== resource_for {$api_version}.addresses ==) (==
resource_for {$api_version}.globalAddresses ==)
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/addresses/{address}".format(
host=self._host,
project=request.project,
region=request.region,
address=request.address,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.get(url, headers=headers,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.Address.from_json(response.content, ignore_unknown_fields=True)
def insert(
self,
request: compute.InsertAddressRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertAddressRequest):
The request object. A request message for
Addresses.Insert. See the method
description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses.
Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource.
- For regional operations, use the ``regionOperations``
resource.
- For zonal operations, use the ``zonalOperations``
resource.
For more information, read Global, Regional, and Zonal
Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# Jsonify the request body
body = compute.Address.to_json(
request.address_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
)
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/addresses".format(
host=self._host, project=request.project, region=request.region,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
if compute.InsertAddressRequest.request_id in request:
query_params["requestId"] = request.request_id
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.post(url, headers=headers, data=body,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.Operation.from_json(response.content, ignore_unknown_fields=True)
def list(
self,
request: compute.ListAddressesRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.AddressList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListAddressesRequest):
The request object. A request message for Addresses.List.
See the method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.AddressList:
Contains a list of addresses.
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/addresses".format(
host=self._host, project=request.project, region=request.region,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
if compute.ListAddressesRequest.filter in request:
query_params["filter"] = request.filter
if compute.ListAddressesRequest.max_results in request:
query_params["maxResults"] = request.max_results
if compute.ListAddressesRequest.order_by in request:
query_params["orderBy"] = request.order_by
if compute.ListAddressesRequest.page_token in request:
query_params["pageToken"] = request.page_token
if compute.ListAddressesRequest.return_partial_success in request:
query_params["returnPartialSuccess"] = request.return_partial_success
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.get(url, headers=headers,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.AddressList.from_json(
response.content, ignore_unknown_fields=True
)
__all__ = ("AddressesRestTransport",)
| apache-2.0 | -8,557,189,965,289,486,000 | 41.307531 | 105 | 0.608911 | false |
ningirsu/stepmania-server | smserver/controllers/legacy/login.py | 1 | 2496 | """ Login controller """
from smserver.smutils.smpacket import smpacket
from smserver.smutils.smpacket import smcommand
from smserver.stepmania_controller import StepmaniaController
from smserver.resources import user_resource
from smserver import models
from smserver import exceptions
from smserver import __version__
class LoginController(StepmaniaController):
""" Controller use to manage SMO LOGIN packet """
command = smcommand.SMOClientCommand.LOGIN
require_login = False
def handle(self):
""" Handle a SMO login packet """
resource = user_resource.UserResource(self.server, self.conn.token, self.session)
if self.server.config.auth["autocreate"]:
login_func = resource.login_or_create
else:
login_func = resource.login
try:
user = login_func(self.packet["username"], self.packet["password"])
except exceptions.Forbidden as err:
self.send(smpacket.SMPacketServerNSSMONL(
packet=smpacket.SMOPacketServerLogin(
approval=1,
text=err.message
)
))
return
try:
resource.connect(user, pos=self.packet["player_number"])
except exceptions.Unauthorized as err:
self.send(smpacket.SMPacketServerNSSMONL(
packet=smpacket.SMOPacketServerLogin(
approval=1,
text=err.message
)
))
return
nb_onlines = models.User.nb_onlines(self.session)
max_users = self.server.config.server.get("max_users", -1)
if not self.users:
self._send_server_resume(nb_onlines, max_users)
self.send(smpacket.SMPacketServerNSSMONL(
packet=smpacket.SMOPacketServerLogin(
approval=0,
text="Player %s successfully login" % self.packet["username"]
)
))
self.send(models.Room.smo_list(self.session, self.active_users))
def _send_server_resume(self, nb_onlines, max_users):
self.send_message(self.server.config.server.get("motd", ""), to="me")
self.send_message(
"SMServer v%s, started on %s. %s/%s users online" % (
__version__,
self.server.started_at.strftime("%x at %X"),
nb_onlines + 1,
max_users if max_users > 0 else "--"
),
to="me")
| mit | 2,024,049,548,031,647,500 | 33.191781 | 89 | 0.588141 | false |
iamshang1/Projects | Advanced_ML/Human_Activity_Recognition/LSTM/record_fetcher_between_subject.py | 1 | 24384 | import numpy as np
import glob
import sys
import random
class record_fetcher(object):
'''
creates feature arrays and labels from raw accelerometer/demographic data
splits features and labels between subjects into test/train sets
methods:
- fetch(batch_size,minibatch_size,binary,seed)
calculates summary statistics from raw accelerometer/demographic data and creates
input features and labels for lstm classifier
parameters:
- batch_size: integer
number of frames to use for each train/test instance
e.g. 1000 means each test/train instance represents 10 seconds of data
- minibatch_size: integer
number of frames to use for each set of summary statistics
e.g. 50 will calculate summary statistics over .5 second windows across each train/test instance
- binary: boolean (default True)
use True to set labels for ambulatory/non-ambulatory
use False to set labels for non-ambulatory/walking/running/upstairs/downstairs
- seed: integer (default None)
(optional) seed to use for random test/train splitting
outputs:
- numpy array representing training summary statistics and demographic data over time
dimension 0 is the index of the batch window
dimension 1 is the index of the minibatch window
dimension 2 is the summary statistics and demographic data over each minibatch window
- numpy array representing testing summary statistics and demographic data over time
dimension 0 is the index of the batch window
dimension 1 is the index of the minibatch window
dimension 2 is the summary statistics and demographic data over each minibatch window
- numpy array representing training activity label over each time window
- numpy array representing testing activity label over each time window
'''
def __init__(self):
#collect all valid subject ids
self.subjects = [102,103,105,106,107,108,110,112,113,114,115,116,117,118,119,120,\
121,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,139,140,\
142,143,144,146,148,149,150,151,152,153,154,155,156,157,159,160,161,162,163,\
164,165,166,169,170,171,172,173,174,175,177,178,179,180,181,182,183,184,185,\
186,187,188,189,190,191,192]
#categorize activity ids into ambulatory/non-ambulatory
self.dic1 = {
'ambulatory': [11,12,13,14,23,24,25,26,27,28,29,30,31,32,16,17,18,33,34],
'nonambulatory': [19,20,21,22]
}
#categorize activity ids into non-ambulatory/walking/running/upstairs/downstairs
self.dic2 = {
'nonambulatory': [19,20,21,22],
'walking': [11,12,13,14,23,24,25,26,27,28,29,30,31,32],
'running': [16,17,18],
'upstairs': [33],
'downstairs': [34]
}
#get filenames for all activity arrays
self.ambulatory = []
for i in self.dic1['ambulatory']:
self.ambulatory.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.nonambulatory = []
for i in self.dic1['nonambulatory']:
self.nonambulatory.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.walking = []
for i in self.dic2['walking']:
self.walking.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.running = []
for i in self.dic2['running']:
self.running.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.upstairs = []
for i in self.dic2['upstairs']:
self.upstairs.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.downstairs = []
for i in self.dic2['downstairs']:
self.downstairs.extend(glob.glob('../data/arrays/*_%i_*' % i))
def fetch(self,batch_size,minibatch_size,binary=True,seed=None):
'''
calculates summary statistics from raw accelerometer/demographic data and creates
input features and labels for lstm classifier
parameters:
- batch_size: integer
number of frames to use for each train/test instance
e.g. 1000 means each test/train instance represents 10 seconds of data
- minibatch_size: integer
number of frames to use for each set of summary statistics
e.g. 50 will calculate summary statistics over .5 second windows across each train/test instance
- binary: boolean (default True)
use True to set labels for ambulatory/non-ambulatory
use False to set labels for non-ambulatory/walking/running/upstairs/downstairs
- seed: integer (default None)
(optional) seed to use for random test/train splitting
outputs:
- numpy array representing training summary statistics and demographic data over time
dimension 0 is the index of the batch window
dimension 1 is the index of the minibatch window
dimension is the summary statistics and demographic data over each minibatch window
- numpy array representing testing summary statistics and demographic data over time
dimension 0 is the index of the batch window
dimension 1 is the index of the minibatch window
dimension is the summary statistics and demographic data over each minibatch window
- numpy array representing training activity label over each time window
- numpy array representing testing activity label over each time window
'''
#reserve subset of subject ids as test set
np.random.seed(seed)
X_test_subjects = np.random.choice(self.subjects,6)
X_train_list = []
y_train_list = []
X_test_list = []
y_test_list = []
batches = batch_size//minibatch_size
#for ambulatory/non-ambulatory classification
if binary:
for a in self.ambulatory:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([0,1]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([0,1]))
for a in self.nonambulatory:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([1,0]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([1,0]))
#for non-ambulatory/walking/running/upstairs/downstairs classification
else:
for a in self.nonambulatory:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([1,0,0,0,0]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([1,0,0,0,0]))
for a in self.walking:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([0,1,0,0,0]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([0,1,0,0,0]))
for a in self.running:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([0,0,1,0,0]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([0,0,1,0,0]))
for a in self.upstairs:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([0,0,0,1,0]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([0,0,0,1,0]))
for a in self.downstairs:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([0,0,0,0,1]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([0,0,0,0,1]))
#pair training X/y together and shuffle
print 'shuffling records'
Xy = zip(X_train_list,y_train_list)
random.shuffle(Xy)
#separate training X from y
X_train = np.array([record[0] for record in Xy])
y_train = np.array([record[1] for record in Xy])
print 'feature vector shape:', X_train.shape
print 'label vector shape:', y_train.shape
#pair testing X/y together and shuffle
Xy = zip(X_test_list,y_test_list)
random.shuffle(Xy)
#separate testing X from y
X_test = np.array([record[0] for record in Xy])
y_test = np.array([record[1] for record in Xy])
print 'feature vector shape:', X_test.shape
print 'label vector shape:', y_test.shape
return X_train, X_test, y_train, y_test
def _create_features(self,array):
'''
calculate summary statistics over time window
concatenate with normalized demographic data
the following features are calculated for each axis (X,Y,Z),
magnitude (sqrt of X^2+Y^2+Z^2), first differential of each axis,
and first differential of magnitude:
- mean, std, min, max
- 10,25,50,75,90 percentiles
- number of median crossings
- correlation with other axis
'''
#create features
mag = np.sqrt(array[:,0]**2+array[:,1]**2+array[:,2]**2)
x_mean = np.mean(array[:,0])
y_mean = np.mean(array[:,1])
z_mean = np.mean(array[:,2])
mag_mean = np.mean(mag)
x_std = np.std(array[:,0])
y_std = np.std(array[:,1])
z_std = np.std(array[:,2])
mag_std = np.std(mag)
x_10per = np.percentile(array[:,0],10)
x_25per = np.percentile(array[:,0],25)
x_50per = np.percentile(array[:,0],50)
x_75per = np.percentile(array[:,0],75)
x_90per = np.percentile(array[:,0],90)
x_med = np.median(array[:,0])
x_medcross = np.sum(np.diff((array[:,0]==x_med).astype(int))==1)
x_max = np.amax(array[:,0])
x_min = np.amin(array[:,0])
x_range = x_max - x_min
x_iqrange = x_75per - x_25per
y_10per = np.percentile(array[:,1],10)
y_25per = np.percentile(array[:,1],25)
y_50per = np.percentile(array[:,1],50)
y_75per = np.percentile(array[:,1],75)
y_90per = np.percentile(array[:,1],90)
y_med = np.median(array[:,1])
y_medcross = np.sum(np.diff((array[:,1]==y_med).astype(int))==1)
y_max = np.amax(array[:,1])
y_min = np.amin(array[:,1])
y_range = y_max - y_min
y_iqrange = y_75per - y_25per
z_10per = np.percentile(array[:,2],10)
z_25per = np.percentile(array[:,2],25)
z_50per = np.percentile(array[:,2],50)
z_75per = np.percentile(array[:,2],75)
z_90per = np.percentile(array[:,2],90)
z_med = np.median(array[:,2])
z_medcross = np.sum(np.diff((array[:,2]==z_med).astype(int))==1)
z_max = np.amax(array[:,2])
z_min = np.amin(array[:,2])
z_range = z_max - z_min
z_iqrange = z_75per - z_25per
mag_10per = np.percentile(mag,10)
mag_25per = np.percentile(mag,25)
mag_50per = np.percentile(mag,50)
mag_75per = np.percentile(mag,75)
mag_90per = np.percentile(mag,90)
mag_med = np.median(mag)
mag_medcross = np.sum(np.diff((mag==mag_med).astype(int))==1)
mag_max = np.amax(mag)
mag_min = np.amin(mag)
mag_range = mag_max - mag_min
mag_iqrange = mag_75per - mag_25per
xy_corr = np.correlate(array[:,0],array[:,1])
xz_corr = np.correlate(array[:,0],array[:,2])
yz_corr = np.correlate(array[:,1],array[:,2])
x_d1 = np.diff(array[:,0])
y_d1 = np.diff(array[:,1])
z_d1 = np.diff(array[:,2])
mag_d1 = np.diff(mag)
x_d1_mean = np.mean(x_d1)
y_d1_mean = np.mean(y_d1)
z_d1_mean = np.mean(z_d1)
mag_mean_d1 = np.mean(mag_d1)
x_d1_std = np.std(x_d1)
y_d1_std = np.std(y_d1)
z_d1_std = np.std(z_d1)
mag_std_d1 = np.std(mag_d1)
x_10per_d1 = np.percentile(x_d1,10)
x_25per_d1 = np.percentile(x_d1,25)
x_50per_d1 = np.percentile(x_d1,50)
x_75per_d1 = np.percentile(x_d1,75)
x_90per_d1 = np.percentile(x_d1,90)
x_med_d1 = np.median(x_d1)
x_medcross_d1 = np.sum(np.diff((x_d1==x_med_d1).astype(int))==1)
x_max_d1 = np.amax(x_d1)
x_min_d1 = np.amin(x_d1)
x_range_d1 = x_max_d1 - x_min_d1
x_iqrange_d1 = x_75per_d1 - x_25per_d1
y_10per_d1 = np.percentile(y_d1,10)
y_25per_d1 = np.percentile(y_d1,25)
y_50per_d1 = np.percentile(y_d1,50)
y_75per_d1 = np.percentile(y_d1,75)
y_90per_d1 = np.percentile(y_d1,90)
y_med_d1 = np.median(y_d1)
y_medcross_d1 = np.sum(np.diff((y_d1==y_med_d1).astype(int))==1)
y_max_d1 = np.amax(y_d1)
y_min_d1 = np.amin(y_d1)
y_range_d1 = y_max_d1 - y_min_d1
y_iqrange_d1 = y_75per_d1 - y_25per_d1
z_10per_d1 = np.percentile(z_d1,10)
z_25per_d1 = np.percentile(z_d1,25)
z_50per_d1 = np.percentile(z_d1,50)
z_75per_d1 = np.percentile(z_d1,75)
z_90per_d1 = np.percentile(z_d1,90)
z_med_d1 = np.median(z_d1)
z_medcross_d1 = np.sum(np.diff((z_d1==z_med_d1).astype(int))==1)
z_max_d1 = np.amax(z_d1)
z_min_d1 = np.amin(z_d1)
z_range_d1 = z_max_d1 - z_min_d1
z_iqrange_d1 = z_75per_d1 - z_25per_d1
mag_10per_d1 = np.percentile(mag_d1,10)
mag_25per_d1 = np.percentile(mag_d1,25)
mag_50per_d1 = np.percentile(mag_d1,50)
mag_75per_d1 = np.percentile(mag_d1,75)
mag_90per_d1 = np.percentile(mag_d1,90)
mag_med_d1 = np.median(mag_d1)
mag_medcross_d1 = np.sum(np.diff((mag_d1==mag_med_d1).astype(int))==1)
mag_max_d1 = np.amax(mag_d1)
mag_min_d1 = np.amin(mag_d1)
mag_range_d1 = mag_max_d1 - mag_min_d1
mag_iqrange_d1 = mag_75per_d1 - mag_25per_d1
xy_corr_d1 = np.correlate(x_d1,y_d1)
xz_corr_d1 = np.correlate(x_d1,z_d1)
yz_corr_d1 = np.correlate(y_d1,z_d1)
#concatenate all features
features = np.array([x_mean,x_mean,z_mean,x_std,y_std,z_std,xy_corr,xz_corr,yz_corr,\
x_10per,x_25per,x_50per,x_75per,x_90per,x_max,x_min,x_medcross,x_range,x_iqrange,\
y_10per,y_25per,y_50per,y_75per,y_90per,y_max,y_min,y_medcross,y_range,y_iqrange,\
z_10per,z_25per,z_50per,z_75per,z_90per,z_max,z_min,z_medcross,z_range,z_iqrange,\
mag_mean,mag_std,mag_10per,mag_25per,mag_50per,mag_75per,mag_90per,mag_max,mag_min,mag_medcross,mag_range,mag_iqrange,\
x_d1_mean,y_d1_mean,z_d1_mean,x_d1_std,y_d1_std,z_d1_std,xy_corr_d1,xz_corr_d1,yz_corr_d1,\
x_10per_d1,x_25per_d1,x_50per_d1,x_75per_d1,x_90per_d1,x_max_d1,x_min_d1,x_medcross_d1,x_range_d1,x_iqrange_d1,\
y_10per_d1,y_25per_d1,y_50per_d1,y_75per_d1,y_90per_d1,y_max_d1,y_min_d1,y_medcross_d1,y_range_d1,y_iqrange_d1,\
z_10per_d1,z_25per_d1,z_50per_d1,z_75per_d1,z_90per_d1,z_max_d1,z_min_d1,z_medcross_d1,z_range_d1,z_iqrange_d1,\
mag_mean_d1,mag_std_d1,mag_10per_d1,mag_25per_d1,mag_50per_d1,mag_75per_d1,mag_90per_d1,mag_max_d1,mag_min_d1,mag_medcross_d1,mag_range_d1,mag_iqrange_d1])
features = np.concatenate((features,array[0,3:]))
features = np.expand_dims(features, axis=0)
return features
if __name__ == "__main__":
# verify the required arguments are given
if (len(sys.argv) < 2):
print 'Usage: python record_fetcher_between_subject.py <1 for 2-category labels, 0 for 5-category labels>'
exit(1)
if sys.argv[1] == '1':
binary = True
elif sys.argv[1] == '0':
binary = False
else:
print 'Usage: python record_fetcher_between_subject.py <1 for 2-category labels, 0 for 5-category labels>'
exit(1)
rf = record_fetcher()
X_train,X_test,y_train,y_test = rf.fetch(1000,50,binary=binary,seed=1)
np.save('X_train',X_train)
np.save('X_test',X_test)
np.save('y_train',y_train)
np.save('y_test',y_test) | mit | 2,176,022,732,392,023,600 | 45.096408 | 167 | 0.530635 | false |
reaperhulk/pynacl | src/nacl/__init__.py | 1 | 1499 | # Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import sys
import warnings
__all__ = [
"__title__",
"__summary__",
"__uri__",
"__version__",
"__author__",
"__email__",
"__license__",
"__copyright__",
]
__title__ = "PyNaCl"
__summary__ = (
"Python binding to the Networking and Cryptography (NaCl) " "library"
)
__uri__ = "https://github.com/pyca/pynacl/"
__version__ = "1.5.0.dev1"
__author__ = "The PyNaCl developers"
__email__ = "[email protected]"
__license__ = "Apache License 2.0"
__copyright__ = "Copyright 2013-2018 {0}".format(__author__)
if sys.version_info[0] == 2:
warnings.warn(
"Python 2 is no longer supported by the Python core team. Support for "
"it is now deprecated in PyNaCl, and will be removed in the "
"next release.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 | 646,718,397,534,372,700 | 27.283019 | 79 | 0.661107 | false |
meteorfox/PerfKitBenchmarker | perfkitbenchmarker/linux_packages/openblas.py | 1 | 1365 | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing OpenBLAS installation and cleanup functions."""
from perfkitbenchmarker.linux_packages import INSTALL_DIR
OPENBLAS_DIR = '%s/OpenBLAS' % INSTALL_DIR
GIT_REPO = 'https://github.com/xianyi/OpenBLAS'
GIT_TAG = 'v0.2.15'
def _Install(vm):
"""Installs the OpenBLAS package on the VM."""
vm.Install('build_tools')
vm.Install('fortran')
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, OPENBLAS_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(OPENBLAS_DIR, GIT_TAG))
vm.RemoteCommand('cd {0} && make USE_THREAD=0'.format(OPENBLAS_DIR))
def YumInstall(vm):
"""Installs the OpenBLAS package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the OpenBLAS package on the VM."""
_Install(vm)
| apache-2.0 | 6,253,825,726,855,896,000 | 33.125 | 78 | 0.731136 | false |
dmarley/tfrs | server/models/User.py | 1 | 1571 | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
from django.db import models
from django.utils import timezone
from .FuelSupplier import FuelSupplier
class User(models.Model):
givenName = models.CharField(max_length=255)
surname = models.CharField(max_length=255)
initials = models.CharField(max_length=255)
email = models.CharField(max_length=255)
status = models.CharField(max_length=255)
fuelSupplier = models.ForeignKey('FuelSupplier', on_delete=models.CASCADE,related_name='UserfuelSupplier')
smUserId = models.CharField(max_length=255)
guid = models.CharField(max_length=255)
smAuthorizationDirectory = models.CharField(max_length=255)
| apache-2.0 | -2,166,037,394,678,073,900 | 40.342105 | 208 | 0.741566 | false |
designcc/django-ccpages | ccpages/tests/test_listeners.py | 1 | 3074 | import os
from unittest import skipUnless
from decimal import Decimal
from django.test import TestCase
from django.conf import settings
from django.core.files import File
from ccpages.forms import PagePasswordForm
from ccpages.models import Page, PageAttachment
class ListenerTestCases(TestCase):
@skipUnless(os.path.exists('%s/ccpages/test.pdf' % settings.STATIC_ROOT),
'test.pdf file does not exist')
def test_title(self):
"""A title is set on a file from filename is none is supplied"""
# open file
test_pdf = open('%s/ccpages/test.pdf' % settings.STATIC_ROOT)
# make page and attachment
p1 = Page()
p1.title = '1'
p1.slug = '1'
p1.content = '# Hello World'
p1.order = Decimal('1')
p1.password = 'ha'
p1.status = Page.VISIBLE
p1.save()
at1 = PageAttachment()
at1.page = p1
at1.src = File(test_pdf, 'ccpages/test.pdf')
at1.save()
# the title is 'test.pdf'
self.assertEqual(at1.title, 'test.pdf')
test_pdf.close()
os.unlink(at1.src.path)
# make another one, but this time with a title
test_pdf = open('%s/ccpages/test.pdf' % settings.STATIC_ROOT)
at2 = PageAttachment()
at2.page = p1
at2.src = File(test_pdf, 'ccpages/test.pdf')
at2.title = 'Arther'
at2.save()
# title is now arther
self.assertEqual(at2.title, 'Arther')
# delete the files
test_pdf.close()
os.unlink(at2.src.path)
def test_content_rendered(self):
"""When a page is saved the content is passed through
markdown and saved as content_rendered"""
page1 = Page()
page1.title = '1'
page1.slug = '1'
page1.content = '# Hello World'
page1.order = Decimal('1')
page1.password = 'ha'
page1.status = Page.VISIBLE
page1.save()
# we now have rendered content
self.assertHTMLEqual(
page1.content_rendered,
'<h1 id="hello-world">\nHello World\n</h1>')
def test_hash_if_password(self):
"""A hash is generated on save if page has password"""
page1 = Page()
page1.title = '1'
page1.slug = '1'
page1.content = '1'
page1.order = Decimal('1')
page1.password = 'ha'
page1.status = Page.VISIBLE
page1.save()
# get the page
p = Page.objects.get(pk=page1.pk)
# we have a hash
self.assertEqual(
p.hash,
'f9fc27b9374ad1e3bf34fdbcec3a4fd632427fed')
def test_hash_if_no_password(self):
"""A hash is not generated on save if page has no password"""
page1 = Page()
page1.title = '1'
page1.slug = '1'
page1.content = '1'
page1.order = Decimal('1')
page1.status = Page.VISIBLE
page1.save()
# get the page
p = Page.objects.get(pk=page1.pk)
# we have no hash
self.assertFalse(p.hash)
| bsd-3-clause | 8,473,854,025,559,766,000 | 32.053763 | 77 | 0.571893 | false |
repotvsupertuga/tvsupertuga.repository | script.module.openscrapers/lib/openscrapers/sources_openscrapers/de/filmpalast.py | 1 | 6036 | # -*- coding: UTF-8 -*-
# ..#######.########.#######.##....#..######..######.########....###...########.#######.########..######.
# .##.....#.##.....#.##......###...#.##....#.##....#.##.....#...##.##..##.....#.##......##.....#.##....##
# .##.....#.##.....#.##......####..#.##......##......##.....#..##...##.##.....#.##......##.....#.##......
# .##.....#.########.######..##.##.#..######.##......########.##.....#.########.######..########..######.
# .##.....#.##.......##......##..###.......#.##......##...##..########.##.......##......##...##........##
# .##.....#.##.......##......##...##.##....#.##....#.##....##.##.....#.##.......##......##....##.##....##
# ..#######.##.......#######.##....#..######..######.##.....#.##.....#.##.......#######.##.....#..######.
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import json
import re
import urllib
import urlparse
from openscrapers.modules import cleantitle
from openscrapers.modules import client
from openscrapers.modules import dom_parser
from openscrapers.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['filmpalast.to']
self.base_link = 'http://filmpalast.to'
self.search_link = '/search/title/%s'
self.stream_link = 'stream/%s/1'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases))
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases))
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'localtvshowtitle': localtvshowtitle,
'aliases': aliases, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['localtvshowtitle']
title += ' S%02dE%02d' % (int(season), int(episode))
aliases = source_utils.aliases_to_array(eval(data['aliases']))
aliases = [i + ' S%02dE%02d' % (int(season), int(episode)) for i in aliases]
url = self.__search([title] + aliases)
if not url and data['tvshowtitle'] != data['localtvshowtitle']:
title = data['tvshowtitle']
title += ' S%02dE%02d' % (int(season), int(episode))
url = self.__search([title] + aliases)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
query = urlparse.urljoin(self.base_link, url)
r = client.request(query)
quality = dom_parser.parse_dom(r, 'span', attrs={'id': 'release_text'})[0].content.split(' ')[0]
quality, info = source_utils.get_release_quality(quality)
r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'currentStreamLinks'})
r = [(dom_parser.parse_dom(i, 'p', attrs={'class': 'hostName'}),
dom_parser.parse_dom(i, 'a', attrs={'class': 'stream-src'}, req='data-id')) for i in r]
r = [(re.sub(' hd$', '', i[0][0].content.lower()), [x.attrs['data-id'] for x in i[1]]) for i in r if
i[0] and i[1]]
for hoster, id in r:
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
sources.append({'source': hoster, 'quality': quality, 'language': 'de',
'info': ' | '.join(info + ['' if len(id) == 1 else 'multi-part']), 'url': id,
'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
try:
h_url = []
for id in url:
query = urlparse.urljoin(self.base_link, self.stream_link % id)
r = client.request(query, XHR=True, post=urllib.urlencode({'streamID': id}))
r = json.loads(r)
if 'error' in r and r['error'] == '0' and 'url' in r:
h_url.append(r['url'])
h_url = h_url[0] if len(h_url) == 1 else 'stack://' + ' , '.join(h_url)
return h_url
except:
return
def __search(self, titles):
try:
query = self.search_link % (urllib.quote_plus(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
r = client.request(query)
r = dom_parser.parse_dom(r, 'article')
r = dom_parser.parse_dom(r, 'a', attrs={'class': 'rb'}, req='href')
r = [(i.attrs['href'], i.content) for i in r]
r = [i[0] for i in r if cleantitle.get(i[1]) in t][0]
return source_utils.strip_domain(r)
except:
return
| gpl-2.0 | 9,050,439,875,432,628,000 | 40.061224 | 117 | 0.444997 | false |
victorhahncastell/deepdiff | deepdiff/contenthash.py | 1 | 8366 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import sys
from collections import Iterable
from collections import MutableMapping
from collections import defaultdict
from decimal import Decimal
from hashlib import sha1
import logging
from deepdiff.helper import py3, int, strings, numbers, items
logger = logging.getLogger(__name__)
class Skipped(object):
def __repr__(self):
return "Skipped" # pragma: no cover
def __str__(self):
return "Skipped" # pragma: no cover
class Unprocessed(object):
def __repr__(self):
return "Error: Unprocessed" # pragma: no cover
def __str__(self):
return "Error: Unprocessed" # pragma: no cover
class NotHashed(object):
def __repr__(self):
return "Error: NotHashed" # pragma: no cover
def __str__(self):
return "Error: NotHashed" # pragma: no cover
class DeepHash(dict):
r"""
**DeepHash**
"""
def __init__(self,
obj,
hashes=None,
exclude_types=set(),
hasher=hash,
ignore_repetition=True,
significant_digits=None,
**kwargs):
if kwargs:
raise ValueError(
("The following parameter(s) are not valid: %s\n"
"The valid parameters are obj, hashes, exclude_types."
"hasher and ignore_repetition.") % ', '.join(kwargs.keys()))
self.obj = obj
self.exclude_types = set(exclude_types)
self.exclude_types_tuple = tuple(
exclude_types) # we need tuple for checking isinstance
self.ignore_repetition = ignore_repetition
self.hasher = hasher
hashes = hashes if hashes else {}
self.update(hashes)
self['unprocessed'] = []
self.unprocessed = Unprocessed()
self.skipped = Skipped()
self.not_hashed = NotHashed()
self.significant_digits = significant_digits
self.__hash(obj, parents_ids=frozenset({id(obj)}))
if self['unprocessed']:
logger.warning("Can not hash the following items: {}.".format(self['unprocessed']))
else:
del self['unprocessed']
@staticmethod
def sha1hex(obj):
"""Use Sha1 for more accuracy."""
if py3: # pragma: no cover
if isinstance(obj, str):
obj = "{}:{}".format(type(obj).__name__, obj)
obj = obj.encode('utf-8')
elif isinstance(obj, bytes):
obj = type(obj).__name__.encode('utf-8') + b":" + obj
else: # pragma: no cover
if isinstance(obj, unicode):
obj = u"{}:{}".format(type(obj).__name__, obj)
obj = obj.encode('utf-8')
elif isinstance(obj, str):
obj = type(obj).__name__ + ":" + obj
return sha1(obj).hexdigest()
@staticmethod
def __add_to_frozen_set(parents_ids, item_id):
parents_ids = set(parents_ids)
parents_ids.add(item_id)
return frozenset(parents_ids)
def __get_and_set_str_hash(self, obj):
obj_id = id(obj)
result = self.hasher(obj)
result = "str:{}".format(result)
self[obj_id] = result
return result
def __hash_obj(self, obj, parents_ids=frozenset({}), is_namedtuple=False):
"""Difference of 2 objects"""
try:
if is_namedtuple:
obj = obj._asdict()
else:
obj = obj.__dict__
except AttributeError:
try:
obj = {i: getattr(obj, i) for i in obj.__slots__}
except AttributeError:
self['unprocessed'].append(obj)
return self.unprocessed
result = self.__hash_dict(obj, parents_ids)
result = "nt{}".format(result) if is_namedtuple else "obj{}".format(
result)
return result
def __skip_this(self, obj):
skip = False
if isinstance(obj, self.exclude_types_tuple):
skip = True
return skip
def __hash_dict(self, obj, parents_ids=frozenset({})):
result = []
obj_keys = set(obj.keys())
for key in obj_keys:
key_hash = self.__hash(key)
item = obj[key]
item_id = id(item)
if parents_ids and item_id in parents_ids:
continue
parents_ids_added = self.__add_to_frozen_set(parents_ids, item_id)
hashed = self.__hash(item, parents_ids_added)
hashed = "{}:{}".format(key_hash, hashed)
result.append(hashed)
result.sort()
result = ';'.join(result)
result = "dict:{%s}" % result
return result
def __hash_set(self, obj):
return "set:{}".format(self.__hash_iterable(obj))
def __hash_iterable(self, obj, parents_ids=frozenset({})):
result = defaultdict(int)
for i, x in enumerate(obj):
if self.__skip_this(x):
continue
item_id = id(x)
if parents_ids and item_id in parents_ids:
continue
parents_ids_added = self.__add_to_frozen_set(parents_ids, item_id)
hashed = self.__hash(x, parents_ids_added)
result[hashed] += 1
if self.ignore_repetition:
result = list(result.keys())
else:
result = [
'{}|{}'.format(i[0], i[1]) for i in getattr(result, items)()
]
result.sort()
result = ','.join(result)
result = "{}:{}".format(type(obj).__name__, result)
return result
def __hash_str(self, obj):
return self.__get_and_set_str_hash(obj)
def __hash_number(self, obj):
# Based on diff.DeepDiff.__diff_numbers
if self.significant_digits is not None and isinstance(obj, (
float, complex, Decimal)):
obj_s = ("{:.%sf}" % self.significant_digits).format(obj)
# Special case for 0: "-0.00" should compare equal to "0.00"
if set(obj_s) <= set("-0."):
obj_s = "0.00"
result = "number:{}".format(obj_s)
obj_id = id(obj)
self[obj_id] = result
else:
result = "{}:{}".format(type(obj).__name__, obj)
return result
def __hash_tuple(self, obj, parents_ids):
# Checking to see if it has _fields. Which probably means it is a named
# tuple.
try:
obj._asdict
# It must be a normal tuple
except AttributeError:
result = self.__hash_iterable(obj, parents_ids)
# We assume it is a namedtuple then
else:
result = self.__hash_obj(obj, parents_ids, is_namedtuple=True)
return result
def __hash(self, obj, parent="root", parents_ids=frozenset({})):
"""The main diff method"""
obj_id = id(obj)
if obj_id in self:
return self[obj_id]
result = self.not_hashed
if self.__skip_this(obj):
result = self.skipped
elif obj is None:
result = 'NONE'
elif isinstance(obj, strings):
result = self.__hash_str(obj)
elif isinstance(obj, numbers):
result = self.__hash_number(obj)
elif isinstance(obj, MutableMapping):
result = self.__hash_dict(obj, parents_ids)
elif isinstance(obj, tuple):
result = self.__hash_tuple(obj, parents_ids)
elif isinstance(obj, (set, frozenset)):
result = self.__hash_set(obj)
elif isinstance(obj, Iterable):
result = self.__hash_iterable(obj, parents_ids)
else:
result = self.__hash_obj(obj, parents_ids)
if result != self.not_hashed and obj_id not in self and not isinstance(
obj, numbers):
self[obj_id] = result
if result is self.not_hashed: # pragma: no cover
self[obj_id] = self.not_hashed
self['unprocessed'].append(obj)
return result
if __name__ == "__main__": # pragma: no cover
if not py3:
sys.exit("Please run with Python 3 to verify the doc strings.")
import doctest
doctest.testmod()
| mit | 5,563,972,840,234,503,000 | 29.421818 | 95 | 0.536338 | false |
mrsamuelbarnes/Blockchain-Ballot | crypto.py | 1 | 6940 | # Import required modules
import sys
import random
# Import custom modules
import file
# Definition for a public key
class PublicKey:
def __init__(self, n, g):
# 'n' is a product of the two primes chosen for the key
self.n = n
# 'g' is the public exponent used to encrypt messages
self.g = g
# Definition for a private key
class PrivateKey:
def __init__(self, n, phiN, u):
# 'n' is a product of the two primes chosen for the key
self.n = n
# 'phiN' is the phi of the two primes chosen for the key
self.phiN = phiN
# 'u' is the modular inverse of n mod phi(n)
self.u = u
# Generate a random number of 'n' bits from the system entropy function
def randomNumber(bits):
return random.SystemRandom().getrandbits(bits)
# Perform an 'n' round Miller-Rabin primality test (default 40 rounds has fault rate of 2^-128)
def millerRabin(number, rounds=40):
# Get 'm' and 'k' that satisfies 'number - 1 = 2^k * m' with whole numbers
# Initalise 'k'
k = 0
# Initalise 'm'
m = number - 1
# When 'm' becomes odd the next iteration wont be whole
while m % 2 == 0:
# Iterate 'k'
k += 1
# Calculate 'm'
m /= 2
# Perform the specified number of rounds
for index in xrange(rounds):
# Perform a single round
if not millerRabinRound(number, m, k):
# The round failed, the number is a composite
return False
# The number passed the specified rounds of accuracy
return True
# Perform a single Miller-Rabin round for the given values
# Returns true for a round pass
def millerRabinRound(number, m, k):
# Generate a random 'a' where 1 < a < number - 1
a = random.randrange(2, number - 1)
# Calculate the value for 'x' where x = a^m mod number
x = pow(a, m, number)
# Check if 'x' is 1 or 'number' - 1 which indicates a probable prime
if x == 1 or x == number - 1:
# The number has passed the round
return True
# Loop the operation 'k' times until a round pass or a composite is found
for index in xrange(k - 1):
# Recalculate 'x'
x = pow(x, 2, number)
# Break loop if 'x' is 'number' - 1
if x == number - 1:
break
# If the loop completes the number is composite
else:
# The number has failed the round
return False
#The number has passed the round
return True
# Test if a number is a probable prime
def isProbablePrime(number):
# Number is not prime if it is even
if number % 2 == 0:
return False
# Perform a Miller-Rabin test with the default number of rounds
if millerRabin(number):
# The number passed the test
return True
else:
# The number failed the test
return False
# Generate a probable prime suitable for use in public key encryption
def generatePrime():
# Loop until a suitable prime is found
while True:
# Generate a prime number of 512 bits
possiblePrime = randomNumber(512)
# Return the number if it is a probable prime
if isProbablePrime(possiblePrime):
return possiblePrime
# Calculate modular inverse (a^-1 mod c)
def modularInverse(a, c):
# Set 'b' as 'c' for use in the algorithm
b = c
# Set initial Bezout Coefficients
coefficientT = 0
lastCoefficientT = 1
coefficientS = 1
lastCoefficientS = 0
# Loop until a GCD is found
gcdFound = False
while not gcdFound:
# Calculate the quotient for this round
quotient = a // b
# Calculate the remainder for this round
a, b = b, a % b
# Check if the GCD has been found
if (b == 0):
gcdFound = True
# Calculate the coefficients for this round
coefficientT, lastCoefficientT = lastCoefficientT - quotient * coefficientT, coefficientT
coefficientS, lastCoefficientS = lastCoefficientS - quotient * coefficientS, coefficientS
# Return the calculated inverse
return lastCoefficientT % c
# Generate a Paillier private key and related public key
def generateKeyPair():
# Get 2 Paillier suitable prime numbers
firstPrime = generatePrime()
secondPrime = generatePrime()
# Ensure the primes are distinct
if firstPrime == secondPrime:
# Reattempt the generation
return generateKeyPair()
# Compute composite number 'n'
n = firstPrime * secondPrime
# Compute the phi of 'n'
phiN = (firstPrime - 1) * (secondPrime - 1)
# Compute 'g' for the public key
g = n + 1
# Compute the modular inverse of 'phiN' 'n', phiN^-1 mod n
u = modularInverse(phiN, n)
# Create the public key
public = PublicKey(n, g)
# Create the private key
private = PrivateKey(n, phiN, u)
# Return the key pair
return public, private
# Encrypt plaintext using a Paillier public key
def encrypt(publicKey, plaintext):
# Calculate n^2
nSquared = publicKey.n ** 2
# Generate a random 'r' where 1 < r < n - 1
r = random.randrange(2, publicKey.n - 1)
# Compute the cyphertext as cyphertext = (g^plaintext mod n^2) * (r^n mod n^2) mod n^2
cyphertext = ( pow(publicKey.g, plaintext, nSquared) *
pow(r, publicKey.n, nSquared) % nSquared )
# Return the encrypted cypher
return cyphertext
# Decrypt Paillier cyphertext using a private key
def decrypt(privateKey, cyphertext):
# Calculate n^2
nSquared = privateKey.n ** 2
# Compute the plaintext as plaintext = L(cyphertext^phiN mod n^2) * u mod n
# Where L(x) = (x - 1) / n
plaintext = ( (pow(cyphertext, privateKey.phiN, nSquared) - 1)
// privateKey.n * privateKey.u % privateKey.n )
# Return the decrypted plaintext
return plaintext
# Apply a homomorphic addition to two integers encrypted by the same key
def homomorphicAdd(publicKey, encryptedInteger1, encryptedInteger2):
# Compute the addition as result = encryptedInteger1 * encryptedInteger2 mod n^2
return encryptedInteger1 * encryptedInteger2 % (publicKey.n ** 2)
# Add the encrypted votes of a ballot
def addVotes(votes, publicKey):
# Initalise results array
results = []
# Calculate the number of candidates
candidateCount = len(votes[0])
# Loop through each vote
for index in xrange(len(votes)):
# Check if this is the first vote
if index == 0:
# Simply add the values
results = votes[index]
else:
# Loop through each value
for valueIndex in xrange(candidateCount):
# homomorphicly add this value to the result
results[valueIndex] = homomorphicAdd(publicKey, results[valueIndex], votes[index][valueIndex])
# Return the encrypted results
return results
| mit | -8,274,426,283,300,570,000 | 25.48855 | 110 | 0.63415 | false |
intel-analytics/analytics-zoo | pyzoo/zoo/pipeline/api/keras/optimizers.py | 1 | 4419 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.util.common import *
from bigdl.optim.optimizer import OptimMethod, Default
from zoo.pipeline.api.keras.base import ZooKerasCreator
from zoo.common.utils import callZooFunc
if sys.version >= '3':
long = int
unicode = str
class Adam(OptimMethod, ZooKerasCreator):
"""
An implementation of Adam with learning rate schedule.
>>> adam = Adam()
creating: createZooKerasAdam
creating: createDefault
"""
def __init__(self,
lr=1e-3,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
decay=0.0,
schedule=None,
weight_decay=0.0,
bigdl_type="float"):
"""
:param lr learning rate
:param beta_1 first moment coefficient
:param beta_2 second moment coefficient
:param epsilon for numerical stability
:param decay learning rate decay
:param schedule learning rate schedule, e.g. Warmup or Poly from BigDL
"""
# explicitly reimplement the constructor since:
# 1. This class need to be a subclass of OptimMethod
# 2. The constructor of OptimMethod invokes JavaValue.jvm_class_constructor() directly
# and does not take the polymorphism.
self.value = callZooFunc(
bigdl_type, ZooKerasCreator.jvm_class_constructor(self),
lr,
beta_1,
beta_2,
epsilon,
decay,
weight_decay,
schedule if (schedule) else Default()
)
self.bigdl_type = bigdl_type
class AdamWeightDecay(OptimMethod, ZooKerasCreator):
"""
Implements BERT version of Adam algorithm.
>>> adam = AdamWeightDecay()
creating: createZooKerasAdamWeightDecay
"""
def __init__(self,
lr=1e-3,
warmup_portion=-1.0,
total=-1,
schedule="linear",
beta1=0.9,
beta2=0.999,
epsilon=1e-6,
weight_decay=0.01,
bigdl_type="float"):
"""
:param lr learning rate
:param warmupPortion portion of total for the warmup, -1 means no warmup. Default: -1
:param total total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
:param schedule schedule to use for the warmup. Default: 'linear'
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
:param weightDecay weight decay
"""
# explicitly reimplement the constructor since:
# 1. This class need to be a subclass of OptimMethod
# 2. The constructor of OptimMethod invokes JavaValue.jvm_class_constructor() directly
# and does not take the polymorphism.
self.value = callZooFunc(
bigdl_type, ZooKerasCreator.jvm_class_constructor(self),
lr,
warmup_portion,
total,
schedule,
beta1,
beta2,
epsilon,
weight_decay)
self.bigdl_type = bigdl_type
class PolyEpochDecay(ZooKerasCreator):
"""
A learning rate decay policy, where the effective learning rate
follows a polynomial decay, to be zero by the max_epochs.
Calculation: init_lr * (1 - epoch/max_iteration) ^ (power)
:param power: The coefficient of decay.
:param max_epochs: The maximum number of epochs when lr becomes zero.
>>> poly = PolyEpochDecay(0.5, 5)
creating: createZooKerasPolyEpochDecay
"""
def __init__(self, power, max_epochs, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, power, max_epochs)
| apache-2.0 | -793,365,526,256,598,700 | 32.732824 | 94 | 0.61394 | false |
gsi-upm/Sefarad | docs/conf.py | 1 | 8585 | # -*- coding: utf-8 -*-
#
# Senpy documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 24 08:57:32 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinxcontrib.httpdomain',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sefarad'
copyright = u'2018, A. Pascual Saavedra'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
html_theme = 'alabaster'
html_theme_options = {
'logo': 'Sefarad_Logo.png',
'github_user': 'gsi-upm',
'github_repo': 'sefarad',
'github_banner': True,
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sefaraddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Sefarad.tex', u'Sefarad Documentation',
u'A. Pascual Saavedra', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sefarad', u'Sefarad Documentation',
[u'A. Pascual Saavedra'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Sefarad', u'Sefarad Documentation',
u'A. Pascual Saavedra', 'Sefarad', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 | 7,806,735,911,868,815,000 | 29.770609 | 79 | 0.699709 | false |
01org/yoko-tool | tests/test_unified.py | 1 | 9054 | #!/usr/bin/env python
#
# Copyright (C) 2013-2020 Intel Corporation
# SPDX-License-Identifier: GPL-2.0-only
#
# -*- coding: utf-8 -*-
# vim: ts=4 sw=4 tw=100 et ai si
#
# Author: Artem Bityutskiy <[email protected]>
"""
This is a 'py.test' test for the PowerMeter module.
"""
# pylint: disable=redefined-outer-name
# pylint: disable=too-few-public-methods
from __future__ import absolute_import, division, print_function
import os
import random
import logging
import subprocess
import pytest
from yokolibs import PowerMeter, _config, _logging
class CmdLineArgs(object):
"""A dummy command-line arguments class."""
devnode = None
_LOG = logging.getLogger()
try:
import sys
_LOG_LEVEL = sys.argv[sys.argv.index("--loglevel") + 1].upper()
_logging.setup_logger(_LOG, getattr(logging, _LOG_LEVEL))
except ValueError:
pass
class YokotoolPowerMeter():
"""
This class emulated the 'PowerMeter' class but uses yokotool underneath. This way we can test
yokotool and the PowerMeter class API the same way.
"""
def command(self, cmd, arg=None):
"""The 'command' method which ends up running the tool."""
if cmd == "wait-data-update":
return
if cmd == "configure-data-items":
self._data_items = arg
return
if cmd == "read-data":
ycmd = ["read", "--count=1", ",".join(self._data_items)]
elif "integration" in cmd:
if cmd.startswith("get-") or cmd.startswith("set-"):
ycmd = cmd.split("-")[-1]
else:
ycmd = cmd.split("-")[0]
ycmd = ["integration", ycmd]
else:
ycmd = cmd.split("-", 1)
ycmd = self._ycmd_prefix + ycmd
if arg:
ycmd.append(str(arg))
try:
_LOG.info("%s", " ".join(ycmd))
result = subprocess.check_output(ycmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise PowerMeter.Error(str(err))
if not result:
return None
result = str(result.decode("utf-8").strip())
if cmd == "read-data":
result = result.splitlines()[-1].split(",")
return result
def close(self):
"""Nothing to do on close."""
pass
def __init__(self, **kwargs):
"""The constructuor."""
self._data_items = None
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
self._yokotool_path = os.path.join(basedir, "yokotool")
assert os.path.exists(self._yokotool_path)
self._ycmd_prefix = [self._yokotool_path]
for name, val in kwargs.items():
if name == "devnode":
self._ycmd_prefix.append(kwargs["devnode"])
else:
self._ycmd_prefix.append("--%s=%s" % (name, val))
# Fake the "PowerMeter" commands dictionary.
pmeter = PowerMeter.PowerMeter(**kwargs)
self.commands = pmeter.commands
self.max_data_items = pmeter.max_data_items
pmeter.close()
def prepare_pmeter(pmeter):
"""Prepare the power meter for testing."""
# Reset the integration.
try:
pmeter.command("stop-integration")
except PowerMeter.Error:
pass
try:
pmeter.command("reset-integration")
except PowerMeter.Error:
pass
pmeter.command("set-integration-mode", "normal")
pmeter.command("set-integration-timer", "0")
assert pmeter.command("get-integration-state") == "reset"
assert pmeter.command("set-smoothing-status", "off") is None
@pytest.fixture(params=[PowerMeter.PowerMeter, YokotoolPowerMeter])
def pmeter(devspec, request):
"""Returns a 'PowerMeter' class instance for the given device node."""
args = CmdLineArgs()
secname = None
if "/" in devspec or os.path.exists(devspec):
args.devnode = devspec
else:
secname = devspec
config = _config.process_config(secname=secname, args=args)
pmeter = request.param(**config)
prepare_pmeter(pmeter)
yield pmeter
pmeter.close()
def test_get(pmeter):
"""Verify all the "get something" commands."""
for cmd in pmeter.commands:
if cmd.startswith("get-"):
result = pmeter.command(cmd)
assert result
if "choices" in cmd:
assert result in cmd["choices"]
def set_every_choice(pmeter, cmd, verify):
"""Go through each possible value of a command and set it."""
get_cmd = "get-%s" % cmd
set_cmd = "set-%s" % cmd
if set_cmd not in pmeter.commands or not pmeter.commands[set_cmd]["choices-set"]:
return
orig = pmeter.command(get_cmd)
for val in pmeter.commands[set_cmd]["choices"]:
pmeter.command(set_cmd, val)
if verify:
assert pmeter.command(get_cmd) == val
pmeter.command(set_cmd, orig)
if verify:
assert pmeter.command(get_cmd) == orig
def test_set(pmeter):
"""Verify some of the the "get something" commands."""
for cmd in pmeter.commands:
verify = True
if not cmd.startswith("set-"):
continue
# Skip the range-related commands, they are potentially unsafe to randomly change.
if "-range" in cmd:
continue
# On WT210 remote mode gets enabled when any command is sent, so disable validation.
if cmd == "set-remote-mode":
verify = False
set_every_choice(pmeter, cmd[4:], verify)
# Set ranges to the max. possible values.
value = pmeter.commands["get-current-range"]["choices"][-1]
pmeter.command("set-current-range", value)
assert pmeter.command("get-current-range") == value
value = pmeter.commands["get-voltage-range"]["choices"][-1]
pmeter.command("set-voltage-range", value)
assert pmeter.command("get-voltage-range") == value
# Enable auto-range to make sure power meter selects the reasonable one.
pmeter.command("set-current-auto-range", "on")
pmeter.command("set-voltage-auto-range", "on")
def test_read(pmeter):
"""Test measurement data reading functionality."""
data_items = pmeter.commands["read-data"]["choices"]
max_items = pmeter.max_data_items
# Run several test with random data items.
for _ in range(16):
items = []
for idx in random.sample(range(0, len(data_items)), random.randint(1, max_items)):
items.append(data_items[idx])
pmeter.command("configure-data-items", items)
data = pmeter.command("read-data")
assert len(data) == len(items)
pmeter.command("wait-data-update")
data = pmeter.command("read-data")
assert len(data) == len(items)
def test_integration(pmeter):
"""Test the integration functionality."""
modes = pmeter.commands["get-integration-mode"]["choices"]
# Go through every mode.
for mode in modes:
if mode == "continuous":
timer = "100"
pmeter.command("set-integration-timer", timer)
assert pmeter.command("get-integration-timer") == timer
pmeter.command("set-integration-mode", mode)
assert pmeter.command("get-integration-mode") == mode
# Read 4 data items with integration.
pmeter.command("start-integration")
assert "start" in pmeter.command("get-integration-state")
pmeter.command("configure-data-items", ["P", "I", "V"])
for _ in range(4):
pmeter.command("wait-data-update")
data = pmeter.command("read-data")
assert len(data) == 3
# And without integration.
pmeter.command("stop-integration")
assert "stop" in pmeter.command("get-integration-state")
pmeter.command("configure-data-items", ["P", "I", "V"])
for _ in range(4):
pmeter.command("wait-data-update")
data = pmeter.command("read-data")
assert len(data) == 3
# And again with integration.
pmeter.command("start-integration")
assert "start" in pmeter.command("get-integration-state")
for _ in range(4):
pmeter.command("wait-data-update")
data = pmeter.command("read-data")
assert len(data) == 3
pmeter.command("stop-integration")
assert "stop" in pmeter.command("get-integration-state")
pmeter.command("reset-integration")
assert pmeter.command("get-integration-state") == "reset"
def test_bad_command(pmeter):
"""Verify that bad power meter commands raise an exception."""
with pytest.raises(PowerMeter.Error):
pmeter.command(" get-id")
pmeter.command("get-id ")
pmeter.command("get-id_")
pmeter.command("set-current-range", None)
pmeter.command("set-current-range", "")
pmeter.command("set-current-range", -1)
pmeter.command("set-current-range", 0)
pmeter.command("set-current-range", float(0))
pmeter.command("set-current-range", "0")
| gpl-2.0 | -5,944,822,185,982,959,000 | 31.220641 | 97 | 0.607908 | false |
arenadata/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_server_upgrade.py | 1 | 8007 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management.core.resources.system import Execute
from resource_management.core import shell
from resource_management.libraries.functions import format
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.version import compare_versions
def pre_upgrade_deregister():
"""
Runs the "hive --service hiveserver2 --deregister <version>" command to
de-provision the server in preparation for an upgrade. This will contact
ZooKeeper to remove the server so that clients that attempt to connect
will be directed to other servers automatically. Once all
clients have drained, the server will shutdown automatically; this process
could take a very long time.
This function will obtain the Kerberos ticket if security is enabled.
:return:
"""
import params
Logger.info('HiveServer2 executing "deregister" command in preparation for upgrade...')
if params.security_enabled:
kinit_command=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
Execute(kinit_command,user=params.smokeuser)
# calculate the current hive server version
current_hiveserver_version = _get_current_hiveserver_version()
if current_hiveserver_version is None:
raise Fail('Unable to determine the current HiveServer2 version to deregister.')
# fallback when upgrading because /usr/iop/current/hive-server2/conf/conf.server may not exist
hive_server_conf_dir = params.hive_server_conf_dir
if not os.path.exists(hive_server_conf_dir):
hive_server_conf_dir = "/etc/hive/conf.server"
# deregister
hive_execute_path = params.execute_path
# If upgrading, the upgrade-target hive binary should be used to call the --deregister command.
# If downgrading, the downgrade-source hive binary should be used to call the --deregister command.
if "upgrade" == params.upgrade_direction:
# hive_bin
upgrade_target_version = format_stack_version(params.version)
if upgrade_target_version and compare_versions(upgrade_target_version, "4.1.0.0") >= 0:
upgrade_target_hive_bin = format('/usr/iop/{version}/hive/bin')
if (os.pathsep + params.hive_bin) in hive_execute_path:
hive_execute_path = hive_execute_path.replace(os.pathsep + params.hive_bin, os.pathsep + upgrade_target_hive_bin)
# hadoop_bin_dir
upgrade_target_hadoop_bin = stack_select.get_hadoop_dir("bin", upgrade_stack_only=True)
upgrade_source_hadoop_bin = params.hadoop_bin_dir
if upgrade_target_hadoop_bin and len(upgrade_target_hadoop_bin) > 0 and (os.pathsep + upgrade_source_hadoop_bin) in hive_execute_path:
hive_execute_path = hive_execute_path.replace(os.pathsep + upgrade_source_hadoop_bin, os.pathsep + upgrade_target_hadoop_bin)
command = format('hive --config {hive_server_conf_dir} --service hiveserver2 --deregister ' + current_hiveserver_version)
Execute(command, user=params.hive_user, path=hive_execute_path, tries=1 )
def _get_current_hiveserver_version():
"""
Runs "hive --version" and parses the result in order
to obtain the current version of hive.
:return: the hiveserver2 version, returned by "hive --version"
"""
import params
try:
command = 'hive --version'
return_code, iop_output = shell.call(command, user=params.hive_user, path=params.execute_path)
except Exception, e:
Logger.error(str(e))
raise Fail('Unable to execute hive --version command to retrieve the hiveserver2 version.')
if return_code != 0:
raise Fail('Unable to determine the current HiveServer2 version because of a non-zero return code of {0}'.format(str(return_code)))
match = re.search('^(Hive) ([0-9]+.[0-9]+.\S+)', iop_output, re.MULTILINE)
if match:
current_hive_server_version = match.group(2)
return current_hive_server_version
else:
raise Fail('The extracted hiveserver2 version "{0}" does not matching any known pattern'.format(iop_output))
def post_upgrade_deregister():
"""
Runs the "hive --service hiveserver2 --deregister <version>" command to
de-provision the server in preparation for an upgrade. This will contact
ZooKeeper to remove the server so that clients that attempt to connect
will be directed to other servers automatically. Once all
clients have drained, the server will shutdown automatically; this process
could take a very long time.
This function will obtain the Kerberos ticket if security is enabled.
:return:
"""
import params
Logger.info('HiveServer2 executing "deregister" command to complete upgrade...')
if params.security_enabled:
kinit_command=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
Execute(kinit_command,user=params.smokeuser)
# calculate the current hive server version
current_hiveserver_version = _get_current_hiveserver_version()
if current_hiveserver_version is None:
raise Fail('Unable to determine the current HiveServer2 version to deregister.')
# fallback when upgrading because /usr/hdp/current/hive-server2/conf/conf.server may not exist
hive_server_conf_dir = params.hive_server_conf_dir
if not os.path.exists(hive_server_conf_dir):
hive_server_conf_dir = "/etc/hive/conf.server"
# deregister
hive_execute_path = params.execute_path
# If upgrading, the upgrade-target hive binary should be used to call the --deregister command.
# If downgrading, the downgrade-source hive binary should be used to call the --deregister command.
# By now <stack-selector-tool> has been called to set 'current' to target-stack
if params.downgrade_from_version is not None:
hive_execute_path = _get_hive_execute_path(params.downgrade_from_version)
command = format('hive --config {hive_server_conf_dir} --service hiveserver2 --deregister ' + current_hiveserver_version)
Execute(command, user=params.hive_user, path=hive_execute_path, tries=1 )
def _get_hive_execute_path(stack_version):
"""
Returns the exact execute path to use for the given stack-version.
This method does not return the "current" path
:param stack_version: Exact stack-version to use in the new path
:return: Hive execute path for the exact hdp stack-version
"""
import params
hive_execute_path = params.execute_path
formatted_stack_version = format_stack_version(stack_version)
if formatted_stack_version and compare_versions(formatted_stack_version, "4.1") >= 0:
# hive_bin
new_hive_bin = format('/usr/iop/{stack_version}/hive/bin')
if (os.pathsep + params.hive_bin) in hive_execute_path:
hive_execute_path = hive_execute_path.replace(os.pathsep + params.hive_bin, os.pathsep + new_hive_bin)
# hadoop_bin_dir
new_hadoop_bin = stack_select.get_hadoop_dir_for_stack_version("bin", stack_version)
old_hadoop_bin = params.hadoop_bin_dir
if new_hadoop_bin and len(new_hadoop_bin) > 0 and (os.pathsep + old_hadoop_bin) in hive_execute_path:
hive_execute_path = hive_execute_path.replace(os.pathsep + old_hadoop_bin, os.pathsep + new_hadoop_bin)
return hive_execute_path
| apache-2.0 | -3,441,732,457,333,918,700 | 46.1 | 138 | 0.749594 | false |
xgfone/snippet | snippet/example/python/project/project/common/utils.py | 1 | 2500 | # encoding: utf-8
from __future__ import absolute_import, print_function, unicode_literals, division
import pbr.version
from six import text_type as unicode_type
from six import string_types as basestring_type
from six import binary_type as bytes_type
_BYTES_TYPES = (bytes_type, type(None))
_UNICODE_TYPES = (unicode_type, type(None))
_BASESTRING_TYPES = (basestring_type, type(None))
def get_version(project, version=None):
if version:
return version
return pbr.version.VersionInfo(project).version_string()
def to_bytes(obj, encoding="utf-8"):
"""Converts a string argument to a bytes string.
If the argument is already a bytes string or None, it is returned
unchanged. Otherwise it must be a unicode string and is decoded as
the argument of encoding."""
if isinstance(obj, _BYTES_TYPES):
return obj
elif isinstance(obj, unicode_type):
return obj.encode(encoding)
raise TypeError("Expected bytes, unicode, or None; got %r" % type(obj))
def to_unicode(obj, decoding="utf-8"):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as
the argument of encoding.
"""
if isinstance(obj, _UNICODE_TYPES):
return obj
elif isinstance(obj, bytes_type):
return obj.decode(decoding)
raise TypeError("Expected bytes, unicode, or None; got %r" % type(obj))
def to_basestring(value, encoding="utf-8"):
"""Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use either and should return the type
the user supplied. In python3, the two types are not interchangeable,
so this method is needed to convert byte strings to unicode.
"""
if isinstance(value, _BASESTRING_TYPES):
return value
if not isinstance(value, bytes):
return value.decode(encoding)
raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
# When dealing with the standard library across python 2 and 3 it is
# sometimes useful to have a direct conversion to the native string type
if str is unicode_type:
to_str = native_str = to_unicode
else:
to_str = native_str = to_bytes
| mit | 5,995,021,948,278,806,000 | 34.231884 | 82 | 0.6908 | false |
maralorn/pythonlights | sound.py | 1 | 1790 | #!/usr/bin/python
# open a microphone in pyAudio and get its FFT spectrum
import pyaudio
import numpy as np
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
INPUT_BLOCK_TIME = 0.08
GLIDING_DIVIDER = 4
INPUT_FRAMES_PER_BLOCK = int(RATE*INPUT_BLOCK_TIME/GLIDING_DIVIDER)
soundtype = np.dtype([('l',np.int16),('r',np.int16)])
class Listener(object):
def __init__(self):
self.pa = pyaudio.PyAudio()
self.stream = self.open_mic_stream()
raw = self.listen()
for i in range(1,GLIDING_DIVIDER):
raw += self.listen()
stereodata = np.fromstring(raw,soundtype)
self.buf = (stereodata['l'] + stereodata['r'])/2
def stop(self):
self.stream.close()
def open_mic_stream( self ):
stream = self.pa.open( format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
input_device_index = None,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK)
return stream
def listen(self):
try:
block = self.stream.read(INPUT_FRAMES_PER_BLOCK)
except IOError:
return
return block
# Returns the FFT of a sound sample recorded over INPUT_BLOCK_TIME.
# This is a numpy array of RATE*INPUT_BLOCK_TIME/2 values.
# The i-th element represents the frequency i/INPUT_BLOCK_TIME
def get_spectrum(self):
raw = self.listen()
stereodata = np.fromstring(raw,soundtype)
monodata = (stereodata['l'] + stereodata['r'])/2
self.buf[:-len(monodata)] = self.buf[len(monodata):]
self.buf[-len(monodata):] = monodata
return abs(np.fft.rfft(self.buf))
| mit | -6,446,139,762,912,498,000 | 29.862069 | 76 | 0.569832 | false |
infoxchange/barman | barman/lockfile.py | 1 | 9546 | # Copyright (C) 2011-2017 2ndQuadrant Limited
#
# This file is part of Barman.
#
# Barman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Barman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Barman. If not, see <http://www.gnu.org/licenses/>.
"""
This module is the lock manager for Barman
"""
import errno
import fcntl
import os
import re
from barman.exceptions import (LockFileBusy, LockFileParsingError,
LockFilePermissionDenied)
class LockFile(object):
"""
Ensures that there is only one process which is running against a
specified LockFile.
It supports the Context Manager interface, allowing the use in with
statements.
with LockFile('file.lock') as locked:
if not locked:
print "failed"
else:
<do something>
You can also use exceptions on failures
try:
with LockFile('file.lock', True):
<do something>
except LockFileBusy, e, file:
print "failed to lock %s" % file
"""
LOCK_PATTERN = None
"""
If defined in a subclass, it must be a compiled regular expression
which matches the lock filename.
It must provide named groups for the constructor parameters which produce
the same lock name. I.e.:
>>> ServerWalReceiveLock('/tmp', 'server-name').filename
'/tmp/.server-name-receive-wal.lock'
>>> ServerWalReceiveLock.LOCK_PATTERN = re.compile(
r'\.(?P<server_name>.+)-receive-wal\.lock')
>>> m = ServerWalReceiveLock.LOCK_PATTERN.match(
'.server-name-receive-wal.lock')
>>> ServerWalReceiveLock('/tmp', **(m.groupdict())).filename
'/tmp/.server-name-receive-wal.lock'
"""
@classmethod
def build_if_matches(cls, path):
"""
Factory method that creates a lock instance if the path matches
the lock filename created by the actual class
:param path: the full path of a LockFile
:return:
"""
# If LOCK_PATTERN is not defined always return None
if not cls.LOCK_PATTERN:
return None
# Matches the provided path against LOCK_PATTERN
lock_directory = os.path.abspath(os.path.dirname(path))
lock_name = os.path.basename(path)
match = cls.LOCK_PATTERN.match(lock_name)
if match:
# Build the lock object for the provided path
return cls(lock_directory, **(match.groupdict()))
return None
def __init__(self, filename, raise_if_fail=True, wait=False):
self.filename = os.path.abspath(filename)
self.fd = None
self.raise_if_fail = raise_if_fail
self.wait = wait
def acquire(self, raise_if_fail=None, wait=None):
"""
Creates and holds on to the lock file.
When raise_if_fail, a LockFileBusy is raised if
the lock is held by someone else and a LockFilePermissionDenied is
raised when the user executing barman have insufficient rights for
the creation of a LockFile.
Returns True if lock has been successfully acquired, False otherwise.
:param bool raise_if_fail: If True raise an exception on failure
:param bool wait: If True issue a blocking request
:returns bool: whether the lock has been acquired
"""
if self.fd:
return True
fd = None
# method arguments take precedence on class parameters
raise_if_fail = raise_if_fail \
if raise_if_fail is not None else self.raise_if_fail
wait = wait if wait is not None else self.wait
try:
# 384 is 0600 in octal, 'rw-------'
fd = os.open(self.filename, os.O_CREAT | os.O_RDWR, 384)
flags = fcntl.LOCK_EX
if not wait:
flags |= fcntl.LOCK_NB
fcntl.flock(fd, flags)
# Once locked, replace the content of the file
os.lseek(fd, 0, os.SEEK_SET)
os.write(fd, ("%s\n" % os.getpid()).encode('ascii'))
# Truncate the file at the current position
os.ftruncate(fd, os.lseek(fd, 0, os.SEEK_CUR))
self.fd = fd
return True
except (OSError, IOError) as e:
if fd:
os.close(fd) # let's not leak file descriptors
if raise_if_fail:
if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK):
raise LockFileBusy(self.filename)
elif e.errno == errno.EACCES:
raise LockFilePermissionDenied(self.filename)
else:
raise
else:
return False
def release(self):
"""
Releases the lock.
If the lock is not held by the current process it does nothing.
"""
if not self.fd:
return
try:
fcntl.flock(self.fd, fcntl.LOCK_UN)
os.close(self.fd)
except (OSError, IOError):
pass
self.fd = None
def __del__(self):
"""
Avoid stale lock files.
"""
self.release()
# Contextmanager interface
def __enter__(self):
return self.acquire()
def __exit__(self, exception_type, value, traceback):
self.release()
def get_owner_pid(self):
"""
Test whether a lock is already held by a process.
Returns the PID of the owner process or None if the lock is available.
:rtype: int|None
:raises LockFileParsingError: when the lock content is garbled
:raises LockFilePermissionDenied: when the lockfile is not accessible
"""
try:
self.acquire(raise_if_fail=True, wait=False)
except LockFileBusy:
try:
# Read the lock content and parse the PID
# NOTE: We cannot read it in the self.acquire method to avoid
# reading the previous locker PID
with open(self.filename, 'r') as file_object:
return int(file_object.readline().strip())
except ValueError as e:
# This should not happen
raise LockFileParsingError(e)
# release the lock and return None
self.release()
return None
class GlobalCronLock(LockFile):
"""
This lock protects cron from multiple executions.
Creates a global '.cron.lock' lock file under the given lock_directory.
"""
def __init__(self, lock_directory):
super(GlobalCronLock, self).__init__(
os.path.join(lock_directory, '.cron.lock'),
raise_if_fail=True)
class ServerBackupLock(LockFile):
"""
This lock protects a server from multiple executions of backup command
Creates a '.<SERVER>-backup.lock' lock file under the given lock_directory
for the named SERVER.
"""
def __init__(self, lock_directory, server_name):
super(ServerBackupLock, self).__init__(
os.path.join(lock_directory, '.%s-backup.lock' % server_name),
raise_if_fail=True)
class ServerCronLock(LockFile):
"""
This lock protects a server from multiple executions of cron command
Creates a '.<SERVER>-cron.lock' lock file under the given lock_directory
for the named SERVER.
"""
def __init__(self, lock_directory, server_name):
super(ServerCronLock, self).__init__(
os.path.join(lock_directory, '.%s-cron.lock' % server_name),
raise_if_fail=True, wait=False)
class ServerXLOGDBLock(LockFile):
"""
This lock protects a server's xlogdb access
Creates a '.<SERVER>-xlogdb.lock' lock file under the given lock_directory
for the named SERVER.
"""
def __init__(self, lock_directory, server_name):
super(ServerXLOGDBLock, self).__init__(
os.path.join(lock_directory, '.%s-xlogdb.lock' % server_name),
raise_if_fail=True, wait=True)
class ServerWalArchiveLock(LockFile):
"""
This lock protects a server from multiple executions of wal-archive command
Creates a '.<SERVER>-archive-wal.lock' lock file under
the given lock_directory for the named SERVER.
"""
def __init__(self, lock_directory, server_name):
super(ServerWalArchiveLock, self).__init__(
os.path.join(lock_directory, '.%s-archive-wal.lock' % server_name),
raise_if_fail=True, wait=False)
class ServerWalReceiveLock(LockFile):
"""
This lock protects a server from multiple executions of receive-wal command
Creates a '.<SERVER>-receive-wal.lock' lock file under
the given lock_directory for the named SERVER.
"""
# TODO: Implement on the other LockFile subclasses
LOCK_PATTERN = re.compile(r'\.(?P<server_name>.+)-receive-wal\.lock')
def __init__(self, lock_directory, server_name):
super(ServerWalReceiveLock, self).__init__(
os.path.join(lock_directory, '.%s-receive-wal.lock' % server_name),
raise_if_fail=True, wait=False)
| gpl-3.0 | -7,858,874,542,976,082,000 | 32.261324 | 79 | 0.610937 | false |
leonsas/django-push-notifications | push_notifications/gcm.py | 1 | 4098 | """
Google Cloud Messaging
Previously known as C2DM
Documentation is available on the Android Developer website:
https://developer.android.com/google/gcm/index.html
"""
import json
try:
from urllib.request import Request, urlopen
from urllib.parse import urlencode
except ImportError:
# Python 2 support
from urllib2 import Request, urlopen
from urllib import urlencode
from django.core.exceptions import ImproperlyConfigured
from . import NotificationError
from .settings import PUSH_NOTIFICATIONS_SETTINGS as SETTINGS
class GCMError(NotificationError):
pass
def _chunks(l, n):
"""
Yield successive chunks from list \a l with a minimum size \a n
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def _gcm_send(data, content_type):
key = SETTINGS.get("GCM_API_KEY")
if not key:
raise ImproperlyConfigured('You need to set PUSH_NOTIFICATIONS_SETTINGS["GCM_API_KEY"] to send messages through GCM.')
headers = {
"Content-Type": content_type,
"Authorization": "key=%s" % (key),
"Content-Length": str(len(data)),
}
request = Request(SETTINGS["GCM_POST_URL"], data, headers)
return urlopen(request).read()
def _gcm_send_plain(registration_id, data, collapse_key=None, delay_while_idle=False, time_to_live=0):
"""
Sends a GCM notification to a single registration_id.
This will send the notification as form data.
If sending multiple notifications, it is more efficient to use
gcm_send_bulk_message() with a list of registration_ids
"""
values = {"registration_id": registration_id}
if collapse_key:
values["collapse_key"] = collapse_key
if delay_while_idle:
values["delay_while_idle"] = int(delay_while_idle)
if time_to_live:
values["time_to_live"] = time_to_live
for k, v in data.items():
values["data.%s" % (k)] = v.encode("utf-8")
data = urlencode(sorted(values.items())).encode("utf-8") # sorted items for tests
result = _gcm_send(data, "application/x-www-form-urlencoded;charset=UTF-8")
if result.startswith("Error="):
raise GCMError(result)
return result
def _gcm_send_json(registration_ids, data, collapse_key=None, delay_while_idle=False, time_to_live=0):
"""
Sends a GCM notification to one or more registration_ids. The registration_ids
needs to be a list.
This will send the notification as json data.
"""
values = {"registration_ids": registration_ids}
if data is not None:
values["data"] = data
if collapse_key:
values["collapse_key"] = collapse_key
if delay_while_idle:
values["delay_while_idle"] = delay_while_idle
if time_to_live:
values["time_to_live"] = time_to_live
data = json.dumps(values, separators=(",", ":"), sort_keys=True).encode("utf-8") # keys sorted for tests
result = json.loads(_gcm_send(data, "application/json"))
if result["failure"]:
raise GCMError(result)
return result
def gcm_send_message(registration_id, data, collapse_key=None, delay_while_idle=False, time_to_live=0):
"""
Sends a GCM notification to a single registration_id.
This will send the notification as form data if possible, otherwise it will
fall back to json data.
If sending multiple notifications, it is more efficient to use
gcm_send_bulk_message() with a list of registration_ids
"""
args = data, collapse_key, delay_while_idle, time_to_live
try:
_gcm_send_plain(registration_id, *args)
except AttributeError:
_gcm_send_json([registration_id], *args)
def gcm_send_bulk_message(registration_ids, data, collapse_key=None, delay_while_idle=False, time_to_live=0):
"""
Sends a GCM notification to one or more registration_ids. The registration_ids
needs to be a list.
This will send the notification as json data.
"""
args = data, collapse_key, delay_while_idle, time_to_live
# GCM only allows up to 1000 reg ids per bulk message
# https://developer.android.com/google/gcm/gcm.html#request
max_recipients = SETTINGS.get("GCM_MAX_RECIPIENTS")
if len(registration_ids) > max_recipients:
ret = []
for chunk in _chunks(registration_ids, max_recipients):
ret.append(_gcm_send_json(chunk, *args))
return ret
return _gcm_send_json(registration_ids, *args)
| mit | 1,631,497,409,407,225,000 | 27.068493 | 120 | 0.724012 | false |
mtmail/Nominatim | test/python/test_tokenizer_factory.py | 1 | 2134 | """
Tests for creating new tokenizers.
"""
import pytest
from nominatim.db import properties
from nominatim.tokenizer import factory
from nominatim.errors import UsageError
from dummy_tokenizer import DummyTokenizer
@pytest.fixture
def test_config(def_config, tmp_path, property_table, tokenizer_mock):
def_config.project_dir = tmp_path
return def_config
def test_setup_dummy_tokenizer(temp_db_conn, test_config):
tokenizer = factory.create_tokenizer(test_config)
assert isinstance(tokenizer, DummyTokenizer)
assert tokenizer.init_state == "new"
assert (test_config.project_dir / 'tokenizer').is_dir()
assert properties.get_property(temp_db_conn, 'tokenizer') == 'dummy'
def test_setup_tokenizer_dir_exists(test_config):
(test_config.project_dir / 'tokenizer').mkdir()
tokenizer = factory.create_tokenizer(test_config)
assert isinstance(tokenizer, DummyTokenizer)
assert tokenizer.init_state == "new"
def test_setup_tokenizer_dir_failure(test_config):
(test_config.project_dir / 'tokenizer').write_text("foo")
with pytest.raises(UsageError):
factory.create_tokenizer(test_config)
def test_setup_bad_tokenizer_name(def_config, tmp_path, monkeypatch):
def_config.project_dir = tmp_path
monkeypatch.setenv('NOMINATIM_TOKENIZER', 'dummy')
with pytest.raises(UsageError):
factory.create_tokenizer(def_config)
def test_load_tokenizer(test_config):
factory.create_tokenizer(test_config)
tokenizer = factory.get_tokenizer_for_db(test_config)
assert isinstance(tokenizer, DummyTokenizer)
assert tokenizer.init_state == "loaded"
def test_load_no_tokenizer_dir(test_config):
factory.create_tokenizer(test_config)
test_config.project_dir = test_config.project_dir / 'foo'
with pytest.raises(UsageError):
factory.get_tokenizer_for_db(test_config)
def test_load_missing_propoerty(temp_db_cursor, test_config):
factory.create_tokenizer(test_config)
temp_db_cursor.execute("TRUNCATE TABLE nominatim_properties")
with pytest.raises(UsageError):
factory.get_tokenizer_for_db(test_config)
| gpl-2.0 | 4,339,036,780,527,847,400 | 27.453333 | 72 | 0.73477 | false |
abhikeshav/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_controller_optics_oper.py | 1 | 95722 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'OpticsFormFactorEnum' : _MetaInfoEnum('OpticsFormFactorEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'not-set':'NOT_SET',
'invalid':'INVALID',
'cpak':'CPAK',
'cxp':'CXP',
'sfp-plus':'SFP_PLUS',
'qsfp':'QSFP',
'qsfp-plus':'QSFP_PLUS',
'qsfp28':'QSFP28',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsControllerStateEnum' : _MetaInfoEnum('OpticsControllerStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'optics-state-up':'OPTICS_STATE_UP',
'optics-state-down':'OPTICS_STATE_DOWN',
'optics-state-admin-down':'OPTICS_STATE_ADMIN_DOWN',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsLaserStateEnum' : _MetaInfoEnum('OpticsLaserStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'on':'ON',
'off':'OFF',
'unknown':'UNKNOWN',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsPhyEnum' : _MetaInfoEnum('OpticsPhyEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'not-set':'NOT_SET',
'invalid':'INVALID',
'long-reach-four-lanes':'LONG_REACH_FOUR_LANES',
'short-reach-ten-lanes':'SHORT_REACH_TEN_LANES',
'short-reach-one-lane':'SHORT_REACH_ONE_LANE',
'long-reach-one-lane':'LONG_REACH_ONE_LANE',
'short-reach-four-lanes':'SHORT_REACH_FOUR_LANES',
'copper-four-lanes':'COPPER_FOUR_LANES',
'active-optical-cable':'ACTIVE_OPTICAL_CABLE',
'fourty-gig-e-long-reach-four-lanes':'FOURTY_GIG_E_LONG_REACH_FOUR_LANES',
'fourty-gig-e-short-reach-four-lanes':'FOURTY_GIG_E_SHORT_REACH_FOUR_LANES',
'cwdm-four-lanes':'CWDM_FOUR_LANES',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsTasEnum' : _MetaInfoEnum('OpticsTasEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'tas-ui-oos':'TAS_UI_OOS',
'tas-ui-main':'TAS_UI_MAIN',
'tas-ui-is':'TAS_UI_IS',
'tas-ui-ains':'TAS_UI_AINS',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsEnum' : _MetaInfoEnum('OpticsEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'optics-unknown':'OPTICS_UNKNOWN',
'optics-grey':'OPTICS_GREY',
'optics-dwdm':'OPTICS_DWDM',
'optics-cwdm':'OPTICS_CWDM',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsLedStateEnum' : _MetaInfoEnum('OpticsLedStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper',
{
'off':'OFF',
'green-on':'GREEN_ON',
'green-flashing':'GREEN_FLASHING',
'yellow-on':'YELLOW_ON',
'yellow-flashing':'YELLOW_FLASHING',
'red-on':'RED_ON',
'red-flashing':'RED_FLASHING',
}, 'Cisco-IOS-XR-controller-optics-oper', _yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper']),
'OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap.DwdmCarrierMapInfo' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap.DwdmCarrierMapInfo',
False,
[
_MetaInfoClassMember('frequency', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Frequency
''',
'frequency',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('g694-chan-num', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' G694 channel number
''',
'g694_chan_num',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('itu-chan-num', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ITU channel number
''',
'itu_chan_num',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('wavelength', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Wavelength
''',
'wavelength',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'dwdm-carrier-map-info',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap',
False,
[
_MetaInfoClassMember('dwdm-carrier-band', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' DWDM carrier band
''',
'dwdm_carrier_band',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('dwdm-carrier-map-info', REFERENCE_LIST, 'DwdmCarrierMapInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap.DwdmCarrierMapInfo',
[], [],
''' DWDM carrier mapping info
''',
'dwdm_carrier_map_info',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('dwdm-carrier-max', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Highest DWDM carrier supported
''',
'dwdm_carrier_max',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('dwdm-carrier-min', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Lowest DWDM carrier supported
''',
'dwdm_carrier_min',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'optics-dwdm-carrrier-channel-map',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.NetworkSrlgInfo' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.NetworkSrlgInfo',
False,
[
_MetaInfoClassMember('network-srlg', REFERENCE_LEAFLIST, 'int' , None, None,
[(0, 4294967295)], [],
''' Network Srlg
''',
'network_srlg',
'Cisco-IOS-XR-controller-optics-oper', False, max_elements=102),
],
'Cisco-IOS-XR-controller-optics-oper',
'network-srlg-info',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRxPower' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRxPower',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-rx-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRxPower' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRxPower',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-rx-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTxPower' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTxPower',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-tx-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTxPower' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTxPower',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-tx-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighLbc' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighLbc',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-lbc',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx1Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx1Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-rx1-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx2Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx2Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-rx2-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx3Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx3Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-rx3-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx4Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx4Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-rx4-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx1Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx1Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-rx1-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx2Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx2Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-rx2-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx3Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx3Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-rx3-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx4Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx4Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-rx4-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx1Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx1Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-tx1-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx2Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx2Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-tx2-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx3Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx3Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-tx3-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx4Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx4Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-tx4-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx1Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx1Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-tx1-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx2Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx2Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-tx2-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx3Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx3Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-tx3-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx4Power' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx4Power',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-tx4-power',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx1Lbc' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx1Lbc',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-tx1lbc',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx2Lbc' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx2Lbc',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-tx2lbc',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx3Lbc' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx3Lbc',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-tx3lbc',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx4Lbc' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx4Lbc',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'high-tx4lbc',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx1Lbc' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx1Lbc',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-tx1lbc',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx2Lbc' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx2Lbc',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-tx2lbc',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx3Lbc' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx3Lbc',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-tx3lbc',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx4Lbc' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx4Lbc',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'low-tx4lbc',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.RxLos' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.RxLos',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'rx-los',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.TxLos' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.TxLos',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'tx-los',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.RxLol' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.RxLol',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'rx-lol',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.TxLol' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.TxLol',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'tx-lol',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.TxFault' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.TxFault',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'tx-fault',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Hidgd' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Hidgd',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'hidgd',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Oorcd' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Oorcd',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'oorcd',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Osnr' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Osnr',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'osnr',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Wvlool' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Wvlool',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'wvlool',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Mea' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Mea',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'mea',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.ImpRemoval' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.ImpRemoval',
False,
[
_MetaInfoClassMember('counter', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Alarm counter
''',
'counter',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('is-detected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is defect detected?
''',
'is_detected',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'imp-removal',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo',
False,
[
_MetaInfoClassMember('hidgd', REFERENCE_CLASS, 'Hidgd' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Hidgd',
[], [],
''' HI DGD
''',
'hidgd',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-lbc', REFERENCE_CLASS, 'HighLbc' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighLbc',
[], [],
''' High laser bias current
''',
'high_lbc',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-rx1-power', REFERENCE_CLASS, 'HighRx1Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx1Power',
[], [],
''' High Rx1 Power
''',
'high_rx1_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-rx2-power', REFERENCE_CLASS, 'HighRx2Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx2Power',
[], [],
''' High Rx2 Power
''',
'high_rx2_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-rx3-power', REFERENCE_CLASS, 'HighRx3Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx3Power',
[], [],
''' High Rx3 Power
''',
'high_rx3_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-rx4-power', REFERENCE_CLASS, 'HighRx4Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx4Power',
[], [],
''' High Rx4 Power
''',
'high_rx4_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-rx-power', REFERENCE_CLASS, 'HighRxPower' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRxPower',
[], [],
''' High Rx Power
''',
'high_rx_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-tx1-power', REFERENCE_CLASS, 'HighTx1Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx1Power',
[], [],
''' High Tx1 Power
''',
'high_tx1_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-tx1lbc', REFERENCE_CLASS, 'HighTx1Lbc' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx1Lbc',
[], [],
''' High Tx1 laser bias current
''',
'high_tx1lbc',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-tx2-power', REFERENCE_CLASS, 'HighTx2Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx2Power',
[], [],
''' High Tx2 Power
''',
'high_tx2_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-tx2lbc', REFERENCE_CLASS, 'HighTx2Lbc' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx2Lbc',
[], [],
''' High Tx2 laser bias current
''',
'high_tx2lbc',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-tx3-power', REFERENCE_CLASS, 'HighTx3Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx3Power',
[], [],
''' High Tx3 Power
''',
'high_tx3_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-tx3lbc', REFERENCE_CLASS, 'HighTx3Lbc' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx3Lbc',
[], [],
''' High Tx3 laser bias current
''',
'high_tx3lbc',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-tx4-power', REFERENCE_CLASS, 'HighTx4Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx4Power',
[], [],
''' High Tx4 Power
''',
'high_tx4_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-tx4lbc', REFERENCE_CLASS, 'HighTx4Lbc' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx4Lbc',
[], [],
''' High Tx4 laser bias current
''',
'high_tx4lbc',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('high-tx-power', REFERENCE_CLASS, 'HighTxPower' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTxPower',
[], [],
''' High Tx Power
''',
'high_tx_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('imp-removal', REFERENCE_CLASS, 'ImpRemoval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.ImpRemoval',
[], [],
''' IMPROPER REM
''',
'imp_removal',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-rx1-power', REFERENCE_CLASS, 'LowRx1Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx1Power',
[], [],
''' Low Rx1 Power
''',
'low_rx1_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-rx2-power', REFERENCE_CLASS, 'LowRx2Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx2Power',
[], [],
''' Low Rx2 Power
''',
'low_rx2_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-rx3-power', REFERENCE_CLASS, 'LowRx3Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx3Power',
[], [],
''' Low Rx3 Power
''',
'low_rx3_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-rx4-power', REFERENCE_CLASS, 'LowRx4Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx4Power',
[], [],
''' Low Rx4 Power
''',
'low_rx4_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-rx-power', REFERENCE_CLASS, 'LowRxPower' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRxPower',
[], [],
''' Low Rx Power
''',
'low_rx_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-tx1-power', REFERENCE_CLASS, 'LowTx1Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx1Power',
[], [],
''' Low Tx1 Power
''',
'low_tx1_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-tx1lbc', REFERENCE_CLASS, 'LowTx1Lbc' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx1Lbc',
[], [],
''' Low Tx1 laser bias current
''',
'low_tx1lbc',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-tx2-power', REFERENCE_CLASS, 'LowTx2Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx2Power',
[], [],
''' Low Tx2 Power
''',
'low_tx2_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-tx2lbc', REFERENCE_CLASS, 'LowTx2Lbc' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx2Lbc',
[], [],
''' Low Tx2 laser bias current
''',
'low_tx2lbc',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-tx3-power', REFERENCE_CLASS, 'LowTx3Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx3Power',
[], [],
''' Low Tx3 Power
''',
'low_tx3_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-tx3lbc', REFERENCE_CLASS, 'LowTx3Lbc' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx3Lbc',
[], [],
''' Low Tx3 laser bias current
''',
'low_tx3lbc',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-tx4-power', REFERENCE_CLASS, 'LowTx4Power' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx4Power',
[], [],
''' Low Tx4 Power
''',
'low_tx4_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-tx4lbc', REFERENCE_CLASS, 'LowTx4Lbc' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx4Lbc',
[], [],
''' Low Tx4 laser bias current
''',
'low_tx4lbc',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('low-tx-power', REFERENCE_CLASS, 'LowTxPower' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTxPower',
[], [],
''' Low Tx Power
''',
'low_tx_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('mea', REFERENCE_CLASS, 'Mea' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Mea',
[], [],
''' MEA
''',
'mea',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('oorcd', REFERENCE_CLASS, 'Oorcd' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Oorcd',
[], [],
''' OOR CD
''',
'oorcd',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('osnr', REFERENCE_CLASS, 'Osnr' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Osnr',
[], [],
''' OSNR
''',
'osnr',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('rx-lol', REFERENCE_CLASS, 'RxLol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.RxLol',
[], [],
''' RX LOL
''',
'rx_lol',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('rx-los', REFERENCE_CLASS, 'RxLos' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.RxLos',
[], [],
''' RX LOS
''',
'rx_los',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('tx-fault', REFERENCE_CLASS, 'TxFault' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.TxFault',
[], [],
''' TX Fault
''',
'tx_fault',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('tx-lol', REFERENCE_CLASS, 'TxLol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.TxLol',
[], [],
''' TX LOL
''',
'tx_lol',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('tx-los', REFERENCE_CLASS, 'TxLos' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.TxLos',
[], [],
''' TX LOS
''',
'tx_los',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('wvlool', REFERENCE_CLASS, 'Wvlool' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Wvlool',
[], [],
''' WVL OOL
''',
'wvlool',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'optics-alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.LaneData' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.LaneData',
False,
[
_MetaInfoClassMember('lane-index', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' The index number of the lane
''',
'lane_index',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('laser-bias-current-milli-amps', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Laser Bias Current in units of 0.01mA
''',
'laser_bias_current_milli_amps',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('laser-bias-current-percent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Laser Bias Current in units of 0.01%
''',
'laser_bias_current_percent',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('receive-power', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Transponder receive power in the unit of 0.01dBm
''',
'receive_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('transmit-power', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Transmit power in the unit of 0.01dBm
''',
'transmit_power',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'lane-data',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort.OpticsInfo',
False,
[
_MetaInfoClassMember('cd', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Chromatic Dispersion ps/nm
''',
'cd',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('cd-high-threshold', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Chromatic Dispersion high threshold ps/nm
''',
'cd_high_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('cd-low-threshold', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Chromatic Dispersion low threshold ps/nm
''',
'cd_low_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('cd-max', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Chromatic Dispersion Max ps/nm
''',
'cd_max',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('cd-min', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Chromatic Dispersion Min ps/nm
''',
'cd_min',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('cfg-tx-power', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Configured Tx power value
''',
'cfg_tx_power',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('controller-state', REFERENCE_ENUM_CLASS, 'OpticsControllerStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsControllerStateEnum',
[], [],
''' Optics controller state: Up, Down or
Administratively Down
''',
'controller_state',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('dgd-high-threshold', ATTRIBUTE, 'str' , None, None,
[], [],
''' DGD high threshold in 0.1 ps
''',
'dgd_high_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('differential-group-delay', ATTRIBUTE, 'str' , None, None,
[], [],
''' Differential Group Delay ps
''',
'differential_group_delay',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('display-volt-temp', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Display Volt/Temp ?
''',
'display_volt_temp',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('dwdm-carrier-band', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' DWDM Carrier Band information
''',
'dwdm_carrier_band',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('dwdm-carrier-channel', ATTRIBUTE, 'str' , None, None,
[], [],
''' Current ITU DWDM Carrier channel number
''',
'dwdm_carrier_channel',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('dwdm-carrier-frequency', ATTRIBUTE, 'str' , None, None,
[], [],
''' DWDM Carrier frequency read from hw in the unit
0.01THz
''',
'dwdm_carrier_frequency',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('dwdm-carrier-wavelength', ATTRIBUTE, 'str' , None, None,
[], [],
''' Wavelength of color optics 0.001nm
''',
'dwdm_carrier_wavelength',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('form-factor', REFERENCE_ENUM_CLASS, 'OpticsFormFactorEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsFormFactorEnum',
[], [],
''' Optics form factor
''',
'form_factor',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('grey-wavelength', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Wavelength of grey optics 0.01nm
''',
'grey_wavelength',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('lane-data', REFERENCE_LIST, 'LaneData' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.LaneData',
[], [],
''' Lane information
''',
'lane_data',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('laser-state', REFERENCE_ENUM_CLASS, 'OpticsLaserStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsLaserStateEnum',
[], [],
''' Showing laser state.Either ON or OFF or unknown
''',
'laser_state',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('lbc-high-threshold', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' LBC High threshold value
''',
'lbc_high_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('led-state', REFERENCE_ENUM_CLASS, 'OpticsLedStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsLedStateEnum',
[], [],
''' Showing Current Colour of led state
''',
'led_state',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('network-srlg-info', REFERENCE_CLASS, 'NetworkSrlgInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.NetworkSrlgInfo',
[], [],
''' Network SRLG information
''',
'network_srlg_info',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('optical-signal-to-noise-ratio', ATTRIBUTE, 'str' , None, None,
[], [],
''' Optical Signal to Noise Ratio dB
''',
'optical_signal_to_noise_ratio',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('optics-alarm-info', REFERENCE_CLASS, 'OpticsAlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo',
[], [],
''' Optics Alarm Information
''',
'optics_alarm_info',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('optics-module', ATTRIBUTE, 'str' , None, None,
[], [],
''' Optics module name
''',
'optics_module',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('optics-present', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is Optics Present?
''',
'optics_present',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('optics-type', REFERENCE_ENUM_CLASS, 'OpticsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsEnum',
[], [],
''' Optics type name
''',
'optics_type',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('osnr-low-threshold', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSNR low threshold in 0.01 dB
''',
'osnr_low_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('phase-noise', ATTRIBUTE, 'str' , None, None,
[], [],
''' Phase Noise dB
''',
'phase_noise',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('phy-type', REFERENCE_ENUM_CLASS, 'OpticsPhyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsPhyEnum',
[], [],
''' Optics physical type
''',
'phy_type',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('pm-enable', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' PmEable or Disable
''',
'pm_enable',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('polarization-change-rate', ATTRIBUTE, 'str' , None, None,
[], [],
''' Polarization Change Rate rad/s
''',
'polarization_change_rate',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('polarization-dependent-loss', ATTRIBUTE, 'str' , None, None,
[], [],
''' Polarization Dependent Loss dB
''',
'polarization_dependent_loss',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('polarization-mode-dispersion', ATTRIBUTE, 'str' , None, None,
[], [],
''' Polarization Mode Dispersion 0.1ps
''',
'polarization_mode_dispersion',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('rx-high-threshold', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Rx High threshold value
''',
'rx_high_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('rx-low-threshold', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Rx Low threshold value
''',
'rx_low_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('second-order-polarization-mode-dispersion', ATTRIBUTE, 'str' , None, None,
[], [],
''' Second Order Polarization Mode Dispersion 0
.1ps^2
''',
'second_order_polarization_mode_dispersion',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('temp-high-threshold', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Temp High threshold value
''',
'temp_high_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('temp-low-threshold', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Temp Low threshold value
''',
'temp_low_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('transport-admin-state', REFERENCE_ENUM_CLASS, 'OpticsTasEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsTasEnum',
[], [],
''' Transport Admin State
''',
'transport_admin_state',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('tx-high-threshold', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Tx High threshold value
''',
'tx_high_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('tx-low-threshold', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Tx Low threshold value
''',
'tx_low_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('vendor-info', ATTRIBUTE, 'str' , None, None,
[], [],
''' Vendor Information
''',
'vendor_info',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('volt-high-threshold', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Volt High threshold value
''',
'volt_high_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('volt-low-threshold', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Volt Low threshold value
''',
'volt_low_threshold',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'optics-info',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts.OpticsPort' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts.OpticsPort',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Port name
''',
'name',
'Cisco-IOS-XR-controller-optics-oper', True),
_MetaInfoClassMember('optics-dwdm-carrrier-channel-map', REFERENCE_CLASS, 'OpticsDwdmCarrrierChannelMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap',
[], [],
''' Optics operational data
''',
'optics_dwdm_carrrier_channel_map',
'Cisco-IOS-XR-controller-optics-oper', False),
_MetaInfoClassMember('optics-info', REFERENCE_CLASS, 'OpticsInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort.OpticsInfo',
[], [],
''' Optics operational data
''',
'optics_info',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'optics-port',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper.OpticsPorts' : {
'meta_info' : _MetaInfoClass('OpticsOper.OpticsPorts',
False,
[
_MetaInfoClassMember('optics-port', REFERENCE_LIST, 'OpticsPort' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts.OpticsPort',
[], [],
''' Optics operational data
''',
'optics_port',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'optics-ports',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
'OpticsOper' : {
'meta_info' : _MetaInfoClass('OpticsOper',
False,
[
_MetaInfoClassMember('optics-ports', REFERENCE_CLASS, 'OpticsPorts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper', 'OpticsOper.OpticsPorts',
[], [],
''' All Optics Port operational data
''',
'optics_ports',
'Cisco-IOS-XR-controller-optics-oper', False),
],
'Cisco-IOS-XR-controller-optics-oper',
'optics-oper',
_yang_ns._namespaces['Cisco-IOS-XR-controller-optics-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_controller_optics_oper'
),
},
}
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap.DwdmCarrierMapInfo']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRxPower']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRxPower']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTxPower']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTxPower']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighLbc']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx1Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx2Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx3Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighRx4Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx1Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx2Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx3Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowRx4Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx1Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx2Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx3Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx4Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx1Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx2Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx3Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx4Power']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx1Lbc']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx2Lbc']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx3Lbc']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.HighTx4Lbc']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx1Lbc']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx2Lbc']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx3Lbc']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.LowTx4Lbc']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.RxLos']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.TxLos']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.RxLol']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.TxLol']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.TxFault']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Hidgd']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Oorcd']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Osnr']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Wvlool']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.Mea']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo.ImpRemoval']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.NetworkSrlgInfo']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.OpticsAlarmInfo']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo.LaneData']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsDwdmCarrrierChannelMap']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort.OpticsInfo']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts.OpticsPort']['meta_info']
_meta_table['OpticsOper.OpticsPorts.OpticsPort']['meta_info'].parent =_meta_table['OpticsOper.OpticsPorts']['meta_info']
_meta_table['OpticsOper.OpticsPorts']['meta_info'].parent =_meta_table['OpticsOper']['meta_info']
| apache-2.0 | 6,047,835,209,121,108,000 | 53.172043 | 271 | 0.538622 | false |
daStrauss/subsurface | src/superSolve/wrapCvxopt.py | 1 | 2619 | '''
Created on Jul 9, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
'''
import cvxopt
from cvxopt import umfpack
import copy
import numpy as np
from cvxopt import lapack
# i guess I don't explicitly check that these are sparse matrices.
# from scipy import sparse
def linsolve(A,b):
aLocal = A.tocoo()
AC = cvxopt.spmatrix(aLocal.data.tolist(),aLocal.row.tolist(), aLocal.col.tolist())
bLocal = cvxopt.matrix(copy.deepcopy(b))
umfpack.linsolve(AC,bLocal)
bLocal = np.array(bLocal).flatten()
return bLocal
def staticSolver(A):
'''Creates a routine for solving the matrix A --uses UMFPACK underneath'''
aLocal = A.tocoo()
AC = cvxopt.spmatrix(aLocal.data.tolist(),aLocal.row.tolist(), aLocal.col.tolist())
Fs = umfpack.symbolic(AC)
FA = umfpack.numeric(AC,Fs)
def Q( b ):
bLocal = cvxopt.matrix(copy.deepcopy(b))
umfpack.solve(AC,FA,bLocal)
bLocal = np.array(bLocal).flatten()
return bLocal
return Q
def createSymbolic(A):
''' returns a symbolic factorization object for later reuse'''
s = A.shape
aLocal = A.tocoo()
AC = cvxopt.spmatrix(aLocal.data.tolist(),aLocal.row.tolist(), aLocal.col.tolist(),s)
Fs = umfpack.symbolic(AC)
return Fs
def solveNumeric(A,b, Fs):
''' given a static Fs, or symbolic factorization of the matrix A, performs the numeric part '''
aLocal = A.tocoo()
s = A.shape
AC = cvxopt.spmatrix(aLocal.data.tolist(),aLocal.row.tolist(), aLocal.col.tolist(),s)
# Fs = umfpack.symbolic(AC)
FA = umfpack.numeric(AC,Fs)
bLocal = cvxopt.matrix(copy.deepcopy(b))
umfpack.solve(AC,FA,bLocal)
bLocal = np.array(bLocal).flatten()
return bLocal
def denseSolve(A,b):
''' solves an Ax = b matrix system with gesv'''
if isinstance(A,np.ndarray):
aLocal = cvxopt.matrix(A)
bLocal = cvxopt.matrix(b)
lapack.gesv(aLocal,bLocal)
return np.array(bLocal).flatten()
else:
return linsolve(A,b)
| apache-2.0 | -7,317,539,029,297,520,000 | 30.926829 | 99 | 0.688694 | false |
rouxcode/django-filer-addons | filer_addons/tests/test_management_commands.py | 1 | 2189 | # -*- coding: utf-8 -*-
from django.test import TestCase
from filer.tests import create_superuser
from filer.models import File, Folder
from filer_addons.tests.utils import create_django_file
class ManagementCommandsTests(TestCase):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
self.folder = Folder.objects.create(name='test')
self.another_folder = Folder.objects.create(name='test')
def tearDown(self):
self.delete_files()
for folder in Folder.objects.all():
folder.delete()
def delete_files(self):
for f in File.objects.all():
f.delete()
def create_file(self, **kwargs):
"""
two files
kwargs size: tuple, img dimension
kwargs name: filename
:param kwargs:
:return:
"""
filename = 'file.jpg'
if kwargs.get('name', None):
filename = kwargs['name']
size = (50, 50, )
if kwargs.get('size', None):
size = kwargs['size']
django_file = create_django_file(filename=filename, size=size)
file_obj = File.objects.create(
owner=self.superuser,
original_filename=filename,
file=django_file,
)
file_obj.save()
return file_obj
# TODO: write more management command tests
def test_delete_thumbnails(self):
from django.core.management import call_command
call_command('filer_addons', 'delete_thumbnails', )
# import django
# from django.core.management import call_command
# if django.VERSION[:2] < (2, 1):
# call_command('filer_addons', 'delete_thumbnails', )
# else:
# call_command('filer_addons' )
# from filer_addons.filer_utils.management.commands import delete_thumbnails
# call_command(delete_thumbnails.Command(), )
# check for thumb dir not existing
def test_unused_files_command(selfs):
pass
def test_orphaned_files_command(selfs):
pass
def test_import_existing_files_command(selfs):
pass
| mit | -3,994,054,243,302,470,000 | 30.724638 | 88 | 0.602101 | false |
jsanmor/sm-status-log | docs/conf.py | 1 | 7848 | # -*- coding: utf-8 -*-
#
# smstatuslog_project documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 17 11:46:20 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'smstatuslog_project'
copyright = u'2014, ChangeMyName'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'smstatuslog_projectdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'smstatuslog_project.tex', u'smstatuslog_project Documentation',
u'ChangeToMyName', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'smstatuslog_project', u'smstatuslog_project Documentation',
[u'ChangeToMyName'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'smstatuslog_project', u'smstatuslog_project Documentation',
u'ChangeToMyName', 'smstatuslog_project', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| gpl-2.0 | -5,517,463,744,943,174,000 | 31.429752 | 80 | 0.707569 | false |
pmarti/pyclutter | examples/hello.py | 1 | 1992 | import sys
import clutter
class HelloClutter:
def __init__ (self, message):
self.stage = clutter.Stage()
self.stage.set_color(clutter.color_from_string('DarkSlateGrey'))
self.stage.set_size(800, 600)
self.stage.set_title('My First Clutter Application')
self.stage.connect('key-press-event', clutter.main_quit)
self.stage.connect('button-press-event',
self.on_button_press_event)
color = clutter.Color(0xff, 0xcc, 0xcc, 0xdd)
self.label = clutter.Text()
self.label.set_font_name('Mono 32')
self.label.set_text(message)
self.label.set_color(color)
(label_width, label_height) = self.label.get_size()
label_x = self.stage.get_width() - label_width - 50
label_y = self.stage.get_height() - label_height
self.label.set_position(label_x, label_y)
self.stage.add(self.label)
self.cursor = clutter.Rectangle()
self.cursor.set_color(color)
self.cursor.set_size(20, label_height)
cursor_x = self.stage.get_width() - 50
cursor_y = self.stage.get_height() - label_height
self.cursor.set_position(cursor_x, cursor_y)
self.stage.add(self.cursor)
self.timeline = clutter.Timeline(500)
self.timeline.set_loop(True)
alpha = clutter.Alpha(self.timeline, clutter.LINEAR)
self.behaviour = clutter.BehaviourOpacity(0xdd, 0, alpha)
self.behaviour.apply(self.cursor)
def on_button_press_event (self, stage, event):
print "mouse button %d pressed at (%d, %d)" % \
(event.button, event.x, event.y)
def run (self):
self.stage.show_all()
self.timeline.start()
clutter.main()
def main (args):
if args:
message = args[0]
else:
message = 'Hello, Clutter!'
app = HelloClutter(message)
app.run()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| lgpl-2.1 | -3,097,715,510,959,132,000 | 32.2 | 72 | 0.601908 | false |
AtalM2/iAtal | src/python/test_broken.py | 1 | 1071 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import elements
import enums
#globals
ended = False
compteur = 0
map_ = False
def init(aMap):
global map_
map_ = aMap
def robot_init():
global undergroundSensor
undergroundSensor = sensor(enums.Level.Underground, 1)
global greenActuator
greenActuator = actuator(enums.Level.Underground, 1, "bibi")
#stratégie python
def strat():
print(undergroundSensor.activate())
greenActuator.activate()
print(undergroundSensor.activate())
global ended
ended = True
#sert à savoir si la strat est finie ou non
def isEnded():
return ended;
#Defines a sensor.
class sensor:
def __init__(self,level_, range_):
self.map_ = map_
self.level_ = level_
self.range_ = range_
def activate(self):
return map_.getItem(self.level_ , self.range_)
#defines an actuator
class actuator:
def __init__(self, level_, range_,newContent_):
self.map_ = map_
self.level_ = level_
self.range_ = range_
self.newContent_ = newContent_
def activate(self):
self.map_.setItem(self.level_, self.range_, self.newContent_)
| gpl-3.0 | 7,224,220,530,610,876,000 | 19.557692 | 63 | 0.703461 | false |
3liz/QgisQuickOSMPlugin | quick_osm_processing/advanced/download_overpass.py | 1 | 3441 | """
/***************************************************************************
QuickOSM QGIS plugin
OSM Overpass API frontend
-------------------
begin : 2017-11-11
copyright : (C) 2017 by Etienne Trimaille
email : etienne dot trimaille at gmail dot com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# import codecs
# import re
# import processing
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from qgis.core import (
QgsProcessingParameterString,
QgsProcessingOutputFile,
)
class DownloadOverpassUrl(QgisAlgorithm):
URL = 'URL'
OUTPUT = 'OUTPUT'
def __init__(self):
super(DownloadOverpassUrl, self).__init__()
self.feedback = None
def group(self):
return self.tr('Advanced')
@staticmethod
def groupId():
return 'advanced'
@staticmethod
def name():
return 'downloadoverpassquery'
def displayName(self):
return self.tr('Download from Overpass')
def flags(self):
return super().flags() # | QgsProcessingAlgorithm.FlagHideFromToolbox
def shortHelpString(self):
return self.tr(
'Like the native QGIS File Downloader algorithm, this algorithm '
'will download an URL but it will also perform a OSM integrity '
'check at the end of the download.')
def initAlgorithm(self, config=None):
self.addParameter(
QgsProcessingParameterString(
self.URL, self.tr('URL, with the query encoded')))
self.addOutput(
QgsProcessingOutputFile(
self.OUTPUT, self.tr('Output')))
def processAlgorithm(self, parameters, context, feedback):
self.feedback = feedback
# url = self.parameterAsString(parameters, self.URL, context)
output = self.parameterAsFileOutput(parameters, self.OUTPUT, context)
# processing.run("native:filedownloader", {
# 'URL': url,
# 'OUTPUT': output,
# }, context=context, feedback=feedback)
# file_obj = codecs.open(self.result_path, 'r', 'utf-8')
# file_obj.seek(0, 2)
# fsize = file_obj.tell()
# file_obj.seek(max(fsize - 1024, 0), 0)
# lines = file_obj.readlines()
# file_obj.close()
#
# lines = lines[-10:] # Get last 10 lines
# timeout = '<remark> runtime error: Query timed out in "[a-z]+" ' \
# 'at line [\d]+ after ([\d]+) seconds. </remark>'
# if re.search(timeout, ''.join(lines)):
# raise QgsProcessingException(tr('Overpass API timeout'))
outputs = {
self.OUTPUT: output,
}
return outputs
| gpl-2.0 | 8,111,771,314,235,636,000 | 34.112245 | 78 | 0.503051 | false |
anselg/handy-scripts | latency/plot_histogram.py | 1 | 4785 | #! /usr/bin/env python
##########################################################################
# Import modules
##########################################################################
import sys
import os
import re
import h5py as h5
import numpy as np
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import operator
import subprocess
##########################################################################
# Parse command-line input; set global parameters
##########################################################################
if (len(sys.argv) == 0):
print("Give me an hdf file")
sys.exit()
else:
filename = sys.argv[1]
plotname = os.path.splitext(filename)[0] + ".svg"
plt.style.use('ggplot')
##########################################################################
# Define methods
##########################################################################
def runShellCommand(command):
output = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE).stdout.read().strip().decode()
return output
def getCpu():
command = "cat /proc/cpuinfo"
text = runShellCommand(command).split('\n')
procline = [line for line in text if re.search("model name", line)][0]
return procline.split(":")[1].strip()
def getGpu():
command = "lshw -numeric -C display"
text = runShellCommand(command).split('\n')
product = [line for line in text if re.search("product", line)]
vendor = [line for line in text if re.search("vendor", line)]
driver = [line for line in text if re.search("driver", line)]
if product and vendor and driver:
product = product[0].split("product:")[1].strip()
vendor = vendor[0].split("vendor:")[1].strip()
driver = driver[0].split("configuration:")[1].strip().split(" ")[
0].split("=")[1].strip()
return vendor, product, driver
else:
return "GPU vendor not found", "GPU model not found", "GPU driver not found"
def getDaq():
command = "lspci"
text = runShellCommand(command).split('\n')
daqline = [line for line in text if re.search("National", line)]
if daqline:
daqline = daqline[0]
return daqline.split(":")[2].strip()
else:
return "DAQ not found"
def getDistro():
command = "echo $(lsb_release -is) $(lsb_release -rs)"
return runShellCommand(command)
def getKernel():
command = "uname -r"
return runShellCommand(command)
def getHostname():
command = "uname -n"
return runShellCommand(command)
def unwrapHistogram(f):
f["Count"] = f["Count"] - 1
latencies = []
for idx, row in f.iterrows():
latencies.extend([row["Latency (us)"]] * int(row["Count"]))
df = pd.DataFrame(latencies, columns=["Latency (us)"])
return df
##########################################################################
# Process data
##########################################################################
#filename = "test_rt_histdata_4.1.18_30min.txt"
raw_data = pd.read_csv(
filename,
sep=" ",
comment="#",
names=[
"Latency (us)",
"Count"])
data = unwrapHistogram(raw_data.copy(deep=True))
##########################################################################
# Generate table
##########################################################################
cpu = getCpu()
daq = getDaq()
hostname = getHostname()
distro = getDistro()
kernel = getKernel()
vendor, model, driver = getGpu()
frequency = 10.0
col1 = [
"Computer",
"Kernel",
"CPU",
"GPU Vendor",
"GPU Model",
"GPU Driver",
"RT Freq"]
col2 = [
hostname + " (" + distro + ")",
kernel,
cpu,
vendor,
model,
driver,
str(frequency) + " kHz"]
col2 = [[value] for value in col2]
##########################################################################
# Generate plot
##########################################################################
f, ax = plt.subplots(
2, gridspec_kw={
'height_ratios': [
1, 2.5]}, figsize=(
8, 7))
ax[0].axis('off')
table = ax[0].table(cellText=col2, rowLabels=col1, loc='center',
colWidths=[.8], colLoc='right', bbox=[.1, 0, .85, 1])
data.hist("Latency (us)", bins=50, ax=ax[1])
ax[1].set_title("")
ax[1].set_yscale('log')
ax[1].set_ylabel('Count')
ax[1].set_xlabel('Latency (us)')
mean_latency = data['Latency (us)'].mean()
std_latency = data['Latency (us)'].std()
ax[1].table(
cellText=[
[mean_latency],
[std_latency]],
rowLabels=[
"Mean (us)",
"Std Dev (us)"],
loc='center right',
colWidths=[.2] * 2)
plt.tight_layout()
plt.savefig(plotname, dpi=300)
plt.close()
| gpl-3.0 | 245,206,177,885,361,280 | 25.731844 | 84 | 0.491118 | false |
mivade/qCamera | qcamera/camprops.py | 1 | 5831 | """Camera properties"""
import os.path
import json
# TODO: don't allow updating of properties that don't exist in the
# default self.props set in __init__
from . exceptions import CameraPropertiesError
PATH = os.path.split(os.path.abspath(__file__))[0]
class CameraProperties(object):
"""Class used for storing properties of the camera in use and
flags about what functionality is supported.
"""
# Basic functions
# -------------------------------------------------------------------------
def __init__(self, filename=None, **kwargs):
"""Without kwargs passed, populate the base properties
dict. Otherwise, populate as appropriate. See self.props for
valid keyword arguments.
Parameters
----------
filename : str or None
If passed, the path to a JSON file that sets all the
camera properties.
"""
self.props = {
# Generic properties
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Number of horizontal and vertical pixels
'pixels': [0, 0],
# Pixel size in um. 0 means N/A
'pixel_um': 0,
# Bits per pixel. This could conceivably be a tuple if a
# color camera. The pixel_mode attribute specifies if it
# is mono or some form of color.
'depth': 8,
'pixel_mode': 'mono',
# Available trigger modes
'trigger_modes': ['internal'],
# Available acquisition modes
'acquisition_modes': ['continuous'],
# List of valid values for binning
'bins': [1],
# Min and max temperatures for cameras with a
# cooler. Meaningless otherwise.
'temp_range': [-90, 30],
# Min and max values for gain. Meaningless if the camera
# gain cannot be adjusted.
'gain_range': [0, 255],
# Min and max values for exposure. Meaningless if the camera
# exposure cannot be adjusted. For some cameras this has units
# for others these are an arbitrary units.
'exposure_range': [1,2000],
# Functionality flags
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Can hardware cropping be set?
'hardware_crop': False,
# Can the gain be adjusted?
'gain_adjust': False,
# Can the exposure be adjusted?
'exposure_adjust': True,
# Is there a built-in tempurature controller for the
# sensor?
'temp_control': False,
# Does the camera have a builtin shutter?
'shutter': False,
# Default settings
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Minimum and maximum threshold values for contrast adjustment
'init_contrast': [0, 256],
# Start acquisition immediately if True
'auto_start': True,
# Initial temperature set point
'init_set_point': -10,
# Start temperature control immediately?
'auto_temp_control': False,
# Initial shutter state open?
'init_shutter': False,
# Initial gain
'init_gain': 0,
# Initial exposure
'init_exposure': 100,
}
# Update parameters from a file if given.
if filename is not None:
self.load(filename)
else:
print("No camera properties loaded!")
def __getitem__(self, key):
return self.props[key]
def __setitem__(self, key, value):
self.props[key] = value
def __delitem__(self, key):
self.props.popitem(key)
def __iter__(self):
pass # TODO
def __str__(self):
return json.dumps(self.props, indent=2)
def update(self, props):
"""Update the props dict."""
assert isinstance(props, dict)
self.props.update(props)
# Loading and saving properties
# -------------------------------------------------------------------------
# Definitions of basic camera properties can be stored in a JSON
# file so that we only need to determine at runtime a few
# differing parameters that change depending on the specific model
# of camera being used. For example, the Andor SDK supports
# several different specific cameras, but some functionality
# depends on the physical camera being used. Most of the
# capabilities for all models is the same, however, and so these
# generic values are stored in a file and only the few that are
# camera-specific are queried for.
def save(self, filename):
"""Save the properties to a JSON file."""
with open(filename, 'w') as outfile:
json.dump(self.props, outfile, indent=4, sort_keys=True)
def load(self, filename, abs_path=False):
"""Load the properties from a JSON file. If abs_path is False,
load the file from the global properties directory (i.e.,
qcamera/props).
"""
if not abs_path:
path = os.path.join(PATH, 'props', filename)
else:
path = filename
with open(path, 'r') as infile:
props = json.load(infile)
# TODO: this should check that keys are valid!
self.props = props
if __name__ == "__main__":
props = CameraProperties()
props.save('test.json')
| bsd-2-clause | 4,954,028,469,685,111,000 | 31.130682 | 79 | 0.513291 | false |
osantana/quickstartup | tests/website/tests.py | 1 | 2616 | import pytest
from django.test import override_settings
from django.urls import NoReverseMatch
from quickstartup.qs_pages.models import Page
from quickstartup.qs_pages.urlresolver import page_reverse
from ..base import TEMPLATES, check_contains, check_in_html, check_template_used
pytestmark = pytest.mark.django_db
@override_settings(TEMPLATES=TEMPLATES)
def test_success_reverse():
Page.objects.create(slug="about", template_name="about.html")
url = page_reverse("about")
assert "/about/" == url
def test_fail_reverse_missing_page():
with pytest.raises(NoReverseMatch):
page_reverse("unknown")
def test_fail_reverse_invalid_url():
with pytest.raises(NoReverseMatch):
page_reverse("/")
def test_bootstrap_pages():
assert Page.objects.get(slug="").get_absolute_url() == "/"
assert Page.objects.get(slug="terms").get_absolute_url() == "/terms/"
assert Page.objects.get(slug="privacy").get_absolute_url() == "/privacy/"
def test_path():
page = Page.objects.get(slug="terms")
assert page.path == "/terms/"
assert str(page) == "/terms/"
def test_filter_invalid_pages():
pages = Page.objects.all()
assert "inv@lid" not in pages
def test_success_terms_page_access(client):
response = client.get("/terms/")
assert response.status_code == 200
assert check_contains(response, "<title>Terms of Service —")
def test_success_terms_page_access_missing_trailing_slash(client):
response = client.get("/terms")
assert check_contains(response, "<title>Terms of Service — ")
def test_success_privacy_page_access(client):
response = client.get("/privacy/")
assert response.status_code == 200
assert check_contains(response, "<title>Privacy Policy —")
def test_fail_page_404(client):
response = client.get("/unknown/")
assert response.status_code == 404
def test_fail_invalid_url(client):
response = client.get("/err/or/")
assert response.status_code == 404
@override_settings(TEMPLATES=TEMPLATES, DEBUG=False)
def test_call_template_with_error_and_debug_disabled(client):
Page.objects.create(slug="buggy-template", template_name="buggy-template.html")
response = client.get(page_reverse("buggy-template"))
assert response.status_code == 404 # original error is 404 because we dont map pages urls
def test_index_page_anonymous_user(client):
response = client.get("/")
assert response.status_code == 200
assert check_template_used(response, "website/landing.html")
assert check_in_html("<title>Django Quickstartup</title>", response.content.decode("utf-8"))
| mit | -3,780,522,163,532,078,000 | 30.071429 | 96 | 0.708046 | false |
prodromou87/gem5 | configs/example/se.py | 1 | 8904 | # Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Simple test script
#
# "m5 test.py"
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
addToPath('../ruby')
addToPath('../topologies')
import Options
import Ruby
import Simulation
import CacheConfig
import MemConfig
from Caches import *
from cpu2000 import *
def get_processes(options):
"""Interprets provided options and returns a list of processes"""
multiprocesses = []
inputs = []
outputs = []
errouts = []
pargs = []
workloads = options.cmd.split(';')
if options.input != "":
inputs = options.input.split(';')
if options.output != "":
outputs = options.output.split(';')
if options.errout != "":
errouts = options.errout.split(';')
if options.options != "":
pargs = options.options.split(';')
idx = 0
for wrkld in workloads:
process = LiveProcess()
process.executable = wrkld
if len(pargs) > idx:
process.cmd = [wrkld] + pargs[idx].split()
else:
process.cmd = [wrkld]
if len(inputs) > idx:
process.input = inputs[idx]
if len(outputs) > idx:
process.output = outputs[idx]
if len(errouts) > idx:
process.errout = errouts[idx]
multiprocesses.append(process)
idx += 1
if options.smt:
assert(options.cpu_type == "detailed" or options.cpu_type == "inorder")
return multiprocesses, idx
else:
return multiprocesses, 1
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
multiprocesses = []
numThreads = 1
if options.bench:
apps = options.bench.split("-")
if len(apps) != options.num_cpus:
print "number of benchmarks not equal to set num_cpus!"
sys.exit(1)
for app in apps:
try:
if buildEnv['TARGET_ISA'] == 'alpha':
exec("workload = %s('alpha', 'tru64', 'ref')" % app)
else:
exec("workload = %s(buildEnv['TARGET_ISA'], 'linux', 'ref')" % app)
multiprocesses.append(workload.makeLiveProcess())
except:
print >>sys.stderr, "Unable to find workload for %s: %s" % (buildEnv['TARGET_ISA'], app)
sys.exit(1)
elif options.cmd:
multiprocesses, numThreads = get_processes(options)
else:
print >> sys.stderr, "No workload specified. Exiting!\n"
sys.exit(1)
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.numThreads = numThreads
MemClass = Simulation.setMemClass(options)
# Check -- do not allow SMT with multiple CPUs
if options.smt and options.num_cpus > 1:
fatal("You cannot use SMT with multiple CPUs!")
np = options.num_cpus
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
mem_mode = test_mem_mode,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size)
# Create a top-level voltage domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
# Create a CPU voltage domain
system.cpu_voltage_domain = VoltageDomain()
# Create a separate clock domain for the CPUs
system.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
system.cpu_voltage_domain)
# All cpus belong to a common cpu_clk_domain, therefore running at a common
# frequency.
for cpu in system.cpu:
cpu.clk_domain = system.cpu_clk_domain
# Sanity check
if options.fastmem:
if CPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
if options.simpoint_profile:
if not options.fastmem:
# Atomic CPU checked with fastmem option already
fatal("SimPoint generation should be done with atomic cpu and fastmem")
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np):
if options.smt:
system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1:
system.cpu[i].workload = multiprocesses[0]
else:
system.cpu[i].workload = multiprocesses[i]
if options.fastmem:
system.cpu[i].fastmem = True
if options.simpoint_profile:
system.cpu[i].simpoint_profile = True
system.cpu[i].simpoint_interval = options.simpoint_interval
if options.checker:
system.cpu[i].addCheckerCpu()
system.cpu[i].createThreads()
if options.ruby:
if not (options.cpu_type == "detailed" or options.cpu_type == "timing"):
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
sys.exit(1)
# Set the option for physmem so that it is not allocated any space
system.physmem = MemClass(range=AddrRange(options.mem_size),
null = True)
options.use_map = True
Ruby.create_system(options, system)
assert(options.num_cpus == len(system.ruby._cpu_ruby_ports))
for i in xrange(np):
ruby_port = system.ruby._cpu_ruby_ports[i]
# Create the interrupt controller and connect its ports to Ruby
# Note that the interrupt controller is always present but only
# in x86 does it have message ports that need to be connected
system.cpu[i].createInterruptController()
# Connect the cpu's cache ports to Ruby
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
if buildEnv['TARGET_ISA'] == 'x86':
system.cpu[i].interrupts.pio = ruby_port.master
system.cpu[i].interrupts.int_master = ruby_port.slave
system.cpu[i].interrupts.int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
else:
system.membus = CoherentBus()
system.system_port = system.membus.slave
CacheConfig.config_cache(options, system)
MemConfig.config_mem(options, system)
root = Root(full_system = False, system = system)
Simulation.run(options, root, system, FutureClass)
| bsd-3-clause | 1,964,174,809,649,631,000 | 33.78125 | 100 | 0.677673 | false |
toobaz/pandas | scripts/tests/test_validate_docstrings.py | 1 | 37241 | import io
import random
import string
import textwrap
import pytest
import numpy as np
import pandas as pd
import validate_docstrings
validate_one = validate_docstrings.validate_one
class GoodDocStrings:
"""
Collection of good doc strings.
This class contains a lot of docstrings that should pass the validation
script without any errors.
"""
def plot(self, kind, color="blue", **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Parameters
----------
kind : str
Kind of matplotlib plot.
color : str, default 'blue'
Color name or rgb code.
**kwargs
These parameters will be passed to the matplotlib plotting
function.
"""
pass
def sample(self):
"""
Generate and return a random number.
The value is sampled from a continuous uniform distribution between
0 and 1.
Returns
-------
float
Random number generated.
"""
return random.random()
def random_letters(self):
"""
Generate and return a sequence of random letters.
The length of the returned string is also random, and is also
returned.
Returns
-------
length : int
Length of the returned string.
letters : str
String of random letters.
"""
length = random.randint(1, 10)
letters = "".join(random.sample(string.ascii_lowercase, length))
return length, letters
def sample_values(self):
"""
Generate an infinite sequence of random numbers.
The values are sampled from a continuous uniform distribution between
0 and 1.
Yields
------
float
Random number generated.
"""
while True:
yield random.random()
def head(self):
"""
Return the first 5 elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Returns
-------
Series
Subset of the original series with the 5 first values.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
"""
return self.iloc[:5]
def head1(self, n=5):
"""
Return the first elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Parameters
----------
n : int
Number of values to return.
Returns
-------
Series
Subset of the original series with the n first values.
See Also
--------
tail : Return the last n elements of the Series.
Examples
--------
>>> s = pd.Series(['Ant', 'Bear', 'Cow', 'Dog', 'Falcon'])
>>> s.head()
0 Ant
1 Bear
2 Cow
3 Dog
4 Falcon
dtype: object
With the `n` parameter, we can change the number of returned rows:
>>> s.head(n=3)
0 Ant
1 Bear
2 Cow
dtype: object
"""
return self.iloc[:n]
def contains(self, pat, case=True, na=np.nan):
"""
Return whether each value contains `pat`.
In this case, we are illustrating how to use sections, even
if the example is simple enough and does not require them.
Parameters
----------
pat : str
Pattern to check for within each element.
case : bool, default True
Whether check should be done with case sensitivity.
na : object, default np.nan
Fill value for missing data.
Examples
--------
>>> s = pd.Series(['Antelope', 'Lion', 'Zebra', np.nan])
>>> s.str.contains(pat='a')
0 False
1 False
2 True
3 NaN
dtype: object
**Case sensitivity**
With `case_sensitive` set to `False` we can match `a` with both
`a` and `A`:
>>> s.str.contains(pat='a', case=False)
0 True
1 False
2 True
3 NaN
dtype: object
**Missing values**
We can fill missing values in the output using the `na` parameter:
>>> s.str.contains(pat='a', na=False)
0 False
1 False
2 True
3 False
dtype: bool
"""
pass
def mode(self, axis, numeric_only):
"""
Ensure sphinx directives don't affect checks for trailing periods.
Parameters
----------
axis : str
Sentence ending in period, followed by single directive.
.. versionchanged:: 0.1.2
numeric_only : bool
Sentence ending in period, followed by multiple directives.
.. versionadded:: 0.1.2
.. deprecated:: 0.00.0
A multiline description,
which spans another line.
"""
pass
def good_imports(self):
"""
Ensure import other than numpy and pandas are fine.
Examples
--------
This example does not import pandas or import numpy.
>>> import datetime
>>> datetime.MAXYEAR
9999
"""
pass
def no_returns(self):
"""
Say hello and have no returns.
"""
pass
def empty_returns(self):
"""
Say hello and always return None.
Since this function never returns a value, this
docstring doesn't need a return section.
"""
def say_hello():
return "Hello World!"
say_hello()
if True:
return
else:
return None
class BadGenericDocStrings:
"""Everything here has a bad docstring
"""
def func(self):
"""Some function.
With several mistakes in the docstring.
It has a blank like after the signature `def func():`.
The text 'Some function' should go in the line after the
opening quotes of the docstring, not in the same line.
There is a blank line between the docstring and the first line
of code `foo = 1`.
The closing quotes should be in the next line, not in this one."""
foo = 1
bar = 2
return foo + bar
def astype(self, dtype):
"""
Casts Series type.
Verb in third-person of the present simple, should be infinitive.
"""
pass
def astype1(self, dtype):
"""
Method to cast Series type.
Does not start with verb.
"""
pass
def astype2(self, dtype):
"""
Cast Series type
Missing dot at the end.
"""
pass
def astype3(self, dtype):
"""
Cast Series type from its current type to the new type defined in
the parameter dtype.
Summary is too verbose and doesn't fit in a single line.
"""
pass
def two_linebreaks_between_sections(self, foo):
"""
Test linebreaks message GL03.
Note 2 blank lines before parameters section.
Parameters
----------
foo : str
Description of foo parameter.
"""
pass
def linebreak_at_end_of_docstring(self, foo):
"""
Test linebreaks message GL03.
Note extra blank line at end of docstring.
Parameters
----------
foo : str
Description of foo parameter.
"""
pass
def plot(self, kind, **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Note the blank line between the parameters title and the first
parameter. Also, note that after the name of the parameter `kind`
and before the colon, a space is missing.
Also, note that the parameter descriptions do not start with a
capital letter, and do not finish with a dot.
Finally, the `**kwargs` parameter is missing.
Parameters
----------
kind: str
kind of matplotlib plot
"""
pass
def method(self, foo=None, bar=None):
"""
A sample DataFrame method.
Do not import numpy and pandas.
Try to use meaningful data, when it makes the example easier
to understand.
Try to avoid positional arguments like in `df.method(1)`. They
can be alright if previously defined with a meaningful name,
like in `present_value(interest_rate)`, but avoid them otherwise.
When presenting the behavior with different parameters, do not place
all the calls one next to the other. Instead, add a short sentence
explaining what the example shows.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> df = pd.DataFrame(np.ones((3, 3)),
... columns=('a', 'b', 'c'))
>>> df.all(1)
0 True
1 True
2 True
dtype: bool
>>> df.all(bool_only=True)
Series([], dtype: bool)
"""
pass
def private_classes(self):
"""
This mentions NDFrame, which is not correct.
"""
def unknown_section(self):
"""
This section has an unknown section title.
Unknown Section
---------------
This should raise an error in the validation.
"""
def sections_in_wrong_order(self):
"""
This docstring has the sections in the wrong order.
Parameters
----------
name : str
This section is in the right position.
Examples
--------
>>> print('So far Examples is good, as it goes before Parameters')
So far Examples is good, as it goes before Parameters
See Also
--------
function : This should generate an error, as See Also needs to go
before Examples.
"""
def deprecation_in_wrong_order(self):
"""
This docstring has the deprecation warning in the wrong order.
This is the extended summary. The correct order should be
summary, deprecation warning, extended summary.
.. deprecated:: 1.0
This should generate an error as it needs to go before
extended summary.
"""
def method_wo_docstrings(self):
pass
class BadSummaries:
def wrong_line(self):
"""Exists on the wrong line"""
pass
def no_punctuation(self):
"""
Has the right line but forgets punctuation
"""
pass
def no_capitalization(self):
"""
provides a lowercase summary.
"""
pass
def no_infinitive(self):
"""
Started with a verb that is not infinitive.
"""
def multi_line(self):
"""
Extends beyond one line
which is not correct.
"""
def two_paragraph_multi_line(self):
"""
Extends beyond one line
which is not correct.
Extends beyond one line, which in itself is correct but the
previous short summary should still be an issue.
"""
class BadParameters:
"""
Everything here has a problem with its Parameters section.
"""
def missing_params(self, kind, **kwargs):
"""
Lacks kwargs in Parameters.
Parameters
----------
kind : str
Foo bar baz.
"""
def bad_colon_spacing(self, kind):
"""
Has bad spacing in the type line.
Parameters
----------
kind: str
Needs a space after kind.
"""
def no_description_period(self, kind):
"""
Forgets to add a period to the description.
Parameters
----------
kind : str
Doesn't end with a dot
"""
def no_description_period_with_directive(self, kind):
"""
Forgets to add a period, and also includes a directive.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionadded:: 0.00.0
"""
def no_description_period_with_directives(self, kind):
"""
Forgets to add a period, and also includes multiple directives.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionchanged:: 0.00.0
.. deprecated:: 0.00.0
"""
def parameter_capitalization(self, kind):
"""
Forgets to capitalize the description.
Parameters
----------
kind : str
this is not capitalized.
"""
def blank_lines(self, kind):
"""
Adds a blank line after the section header.
Parameters
----------
kind : str
Foo bar baz.
"""
pass
def integer_parameter(self, kind):
"""
Uses integer instead of int.
Parameters
----------
kind : integer
Foo bar baz.
"""
pass
def string_parameter(self, kind):
"""
Uses string instead of str.
Parameters
----------
kind : string
Foo bar baz.
"""
pass
def boolean_parameter(self, kind):
"""
Uses boolean instead of bool.
Parameters
----------
kind : boolean
Foo bar baz.
"""
pass
def list_incorrect_parameter_type(self, kind):
"""
Uses list of boolean instead of list of bool.
Parameters
----------
kind : list of boolean, integer, float or string
Foo bar baz.
"""
pass
class BadReturns:
def return_not_documented(self):
"""
Lacks section for Returns
"""
return "Hello world!"
def yield_not_documented(self):
"""
Lacks section for Yields
"""
yield "Hello world!"
def no_type(self):
"""
Returns documented but without type.
Returns
-------
Some value.
"""
return "Hello world!"
def no_description(self):
"""
Provides type but no descrption.
Returns
-------
str
"""
return "Hello world!"
def no_punctuation(self):
"""
Provides type and description but no period.
Returns
-------
str
A nice greeting
"""
return "Hello world!"
def named_single_return(self):
"""
Provides name but returns only one value.
Returns
-------
s : str
A nice greeting.
"""
return "Hello world!"
def no_capitalization(self):
"""
Forgets capitalization in return values description.
Returns
-------
foo : str
The first returned string.
bar : str
the second returned string.
"""
return "Hello", "World!"
def no_period_multi(self):
"""
Forgets period in return values description.
Returns
-------
foo : str
The first returned string
bar : str
The second returned string.
"""
return "Hello", "World!"
class BadSeeAlso:
def desc_no_period(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n
"""
pass
def desc_first_letter_lowercase(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
"""
pass
def prefix_pandas(self):
"""
Have `pandas` prefix in See Also section.
See Also
--------
pandas.Series.rename : Alter Series index labels or name.
DataFrame.head : The first `n` rows of the caller object.
"""
pass
class BadExamples:
def unused_import(self):
"""
Examples
--------
>>> import pandas as pdf
>>> df = pd.DataFrame(np.ones((3, 3)), columns=('a', 'b', 'c'))
"""
pass
def missing_whitespace_around_arithmetic_operator(self):
"""
Examples
--------
>>> 2+5
7
"""
pass
def indentation_is_not_a_multiple_of_four(self):
"""
Examples
--------
>>> if 2 + 5:
... pass
"""
pass
def missing_whitespace_after_comma(self):
"""
Examples
--------
>>> df = pd.DataFrame(np.ones((3,3)),columns=('a','b', 'c'))
"""
pass
class TestValidator:
def _import_path(self, klass=None, func=None):
"""
Build the required import path for tests in this module.
Parameters
----------
klass : str
Class name of object in module.
func : str
Function name of object in module.
Returns
-------
str
Import path of specified object in this module
"""
base_path = "scripts.tests.test_validate_docstrings"
if klass:
base_path = ".".join([base_path, klass])
if func:
base_path = ".".join([base_path, func])
return base_path
def test_good_class(self, capsys):
errors = validate_one(self._import_path(klass="GoodDocStrings"))["errors"]
assert isinstance(errors, list)
assert not errors
@pytest.mark.parametrize(
"func",
[
"plot",
"sample",
"random_letters",
"sample_values",
"head",
"head1",
"contains",
"mode",
"good_imports",
"no_returns",
"empty_returns",
],
)
def test_good_functions(self, capsys, func):
errors = validate_one(self._import_path(klass="GoodDocStrings", func=func))[
"errors"
]
assert isinstance(errors, list)
assert not errors
def test_bad_class(self, capsys):
errors = validate_one(self._import_path(klass="BadGenericDocStrings"))["errors"]
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize(
"func",
[
"func",
"astype",
"astype1",
"astype2",
"astype3",
"plot",
"method",
"private_classes",
],
)
def test_bad_generic_functions(self, capsys, func):
errors = validate_one(
self._import_path(klass="BadGenericDocStrings", func=func) # noqa:F821
)["errors"]
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize(
"klass,func,msgs",
[
# See Also tests
(
"BadGenericDocStrings",
"private_classes",
(
"Private classes (NDFrame) should not be mentioned in public "
"docstrings",
),
),
(
"BadGenericDocStrings",
"unknown_section",
('Found unknown section "Unknown Section".',),
),
(
"BadGenericDocStrings",
"sections_in_wrong_order",
(
"Sections are in the wrong order. Correct order is: Parameters, "
"See Also, Examples",
),
),
(
"BadGenericDocStrings",
"deprecation_in_wrong_order",
("Deprecation warning should precede extended summary",),
),
(
"BadSeeAlso",
"desc_no_period",
('Missing period at end of description for See Also "Series.iloc"',),
),
(
"BadSeeAlso",
"desc_first_letter_lowercase",
('should be capitalized for See Also "Series.tail"',),
),
# Summary tests
(
"BadSummaries",
"wrong_line",
("should start in the line immediately after the opening quotes",),
),
("BadSummaries", "no_punctuation", ("Summary does not end with a period",)),
(
"BadSummaries",
"no_capitalization",
("Summary does not start with a capital letter",),
),
(
"BadSummaries",
"no_capitalization",
("Summary must start with infinitive verb",),
),
("BadSummaries", "multi_line", ("Summary should fit in a single line",)),
(
"BadSummaries",
"two_paragraph_multi_line",
("Summary should fit in a single line",),
),
# Parameters tests
(
"BadParameters",
"missing_params",
("Parameters {**kwargs} not documented",),
),
(
"BadParameters",
"bad_colon_spacing",
(
'Parameter "kind" requires a space before the colon '
"separating the parameter name and type",
),
),
(
"BadParameters",
"no_description_period",
('Parameter "kind" description should finish with "."',),
),
(
"BadParameters",
"no_description_period_with_directive",
('Parameter "kind" description should finish with "."',),
),
(
"BadParameters",
"parameter_capitalization",
('Parameter "kind" description should start with a capital letter',),
),
(
"BadParameters",
"integer_parameter",
('Parameter "kind" type should use "int" instead of "integer"',),
),
(
"BadParameters",
"string_parameter",
('Parameter "kind" type should use "str" instead of "string"',),
),
(
"BadParameters",
"boolean_parameter",
('Parameter "kind" type should use "bool" instead of "boolean"',),
),
(
"BadParameters",
"list_incorrect_parameter_type",
('Parameter "kind" type should use "bool" instead of "boolean"',),
),
(
"BadParameters",
"list_incorrect_parameter_type",
('Parameter "kind" type should use "int" instead of "integer"',),
),
(
"BadParameters",
"list_incorrect_parameter_type",
('Parameter "kind" type should use "str" instead of "string"',),
),
pytest.param(
"BadParameters",
"blank_lines",
("No error yet?",),
marks=pytest.mark.xfail,
),
# Returns tests
("BadReturns", "return_not_documented", ("No Returns section found",)),
("BadReturns", "yield_not_documented", ("No Yields section found",)),
pytest.param("BadReturns", "no_type", ("foo",), marks=pytest.mark.xfail),
("BadReturns", "no_description", ("Return value has no description",)),
(
"BadReturns",
"no_punctuation",
('Return value description should finish with "."',),
),
(
"BadReturns",
"named_single_return",
(
"The first line of the Returns section should contain only the "
"type, unless multiple values are being returned",
),
),
(
"BadReturns",
"no_capitalization",
("Return value description should start with a capital " "letter",),
),
(
"BadReturns",
"no_period_multi",
('Return value description should finish with "."',),
),
# Examples tests
(
"BadGenericDocStrings",
"method",
("Do not import numpy, as it is imported automatically",),
),
(
"BadGenericDocStrings",
"method",
("Do not import pandas, as it is imported automatically",),
),
(
"BadGenericDocStrings",
"method_wo_docstrings",
("The object does not have a docstring",),
),
# See Also tests
(
"BadSeeAlso",
"prefix_pandas",
(
"pandas.Series.rename in `See Also` section "
"does not need `pandas` prefix",
),
),
# Examples tests
(
"BadExamples",
"unused_import",
("flake8 error: F401 'pandas as pdf' imported but unused",),
),
(
"BadExamples",
"indentation_is_not_a_multiple_of_four",
("flake8 error: E111 indentation is not a multiple of four",),
),
(
"BadExamples",
"missing_whitespace_around_arithmetic_operator",
(
"flake8 error: "
"E226 missing whitespace around arithmetic operator",
),
),
(
"BadExamples",
"missing_whitespace_after_comma",
("flake8 error: E231 missing whitespace after ',' (3 times)",),
),
(
"BadGenericDocStrings",
"two_linebreaks_between_sections",
(
"Double line break found; please use only one blank line to "
"separate sections or paragraphs, and do not leave blank lines "
"at the end of docstrings",
),
),
(
"BadGenericDocStrings",
"linebreak_at_end_of_docstring",
(
"Double line break found; please use only one blank line to "
"separate sections or paragraphs, and do not leave blank lines "
"at the end of docstrings",
),
),
],
)
def test_bad_docstrings(self, capsys, klass, func, msgs):
result = validate_one(self._import_path(klass=klass, func=func))
for msg in msgs:
assert msg in " ".join(err[1] for err in result["errors"])
def test_validate_all_ignore_deprecated(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
"validate_one",
lambda func_name: {
"docstring": "docstring1",
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"warnings": [],
"examples_errors": "",
"deprecated": True,
},
)
result = validate_docstrings.validate_all(prefix=None, ignore_deprecated=True)
assert len(result) == 0
class TestApiItems:
@property
def api_doc(self):
return io.StringIO(
textwrap.dedent(
"""
.. currentmodule:: itertools
Itertools
---------
Infinite
~~~~~~~~
.. autosummary::
cycle
count
Finite
~~~~~~
.. autosummary::
chain
.. currentmodule:: random
Random
------
All
~~~
.. autosummary::
seed
randint
"""
)
)
@pytest.mark.parametrize(
"idx,name",
[
(0, "itertools.cycle"),
(1, "itertools.count"),
(2, "itertools.chain"),
(3, "random.seed"),
(4, "random.randint"),
],
)
def test_item_name(self, idx, name):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][0] == name
@pytest.mark.parametrize(
"idx,func",
[(0, "cycle"), (1, "count"), (2, "chain"), (3, "seed"), (4, "randint")],
)
def test_item_function(self, idx, func):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert callable(result[idx][1])
assert result[idx][1].__name__ == func
@pytest.mark.parametrize(
"idx,section",
[
(0, "Itertools"),
(1, "Itertools"),
(2, "Itertools"),
(3, "Random"),
(4, "Random"),
],
)
def test_item_section(self, idx, section):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][2] == section
@pytest.mark.parametrize(
"idx,subsection",
[(0, "Infinite"), (1, "Infinite"), (2, "Finite"), (3, "All"), (4, "All")],
)
def test_item_subsection(self, idx, subsection):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][3] == subsection
class TestDocstringClass:
@pytest.mark.parametrize(
"name, expected_obj",
[
("pandas.isnull", pd.isnull),
("pandas.DataFrame", pd.DataFrame),
("pandas.Series.sum", pd.Series.sum),
],
)
def test_resolves_class_name(self, name, expected_obj):
d = validate_docstrings.Docstring(name)
assert d.obj is expected_obj
@pytest.mark.parametrize("invalid_name", ["panda", "panda.DataFrame"])
def test_raises_for_invalid_module_name(self, invalid_name):
msg = 'No module can be imported from "{}"'.format(invalid_name)
with pytest.raises(ImportError, match=msg):
validate_docstrings.Docstring(invalid_name)
@pytest.mark.parametrize(
"invalid_name", ["pandas.BadClassName", "pandas.Series.bad_method_name"]
)
def test_raises_for_invalid_attribute_name(self, invalid_name):
name_components = invalid_name.split(".")
obj_name, invalid_attr_name = name_components[-2], name_components[-1]
msg = "'{}' has no attribute '{}'".format(obj_name, invalid_attr_name)
with pytest.raises(AttributeError, match=msg):
validate_docstrings.Docstring(invalid_name)
@pytest.mark.parametrize(
"name", ["pandas.Series.str.isdecimal", "pandas.Series.str.islower"]
)
def test_encode_content_write_to_file(self, name):
# GH25466
docstr = validate_docstrings.Docstring(name).validate_pep8()
# the list of pep8 errors should be empty
assert not list(docstr)
class TestMainFunction:
def test_exit_status_for_validate_one(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
"validate_one",
lambda func_name: {
"docstring": "docstring1",
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"warnings": [],
"examples_errors": "",
},
)
exit_status = validate_docstrings.main(
func_name="docstring1",
prefix=None,
errors=[],
output_format="default",
ignore_deprecated=False,
)
assert exit_status == 0
def test_exit_status_errors_for_validate_all(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
"validate_all",
lambda prefix, ignore_deprecated=False: {
"docstring1": {
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"file": "module1.py",
"file_line": 23,
},
"docstring2": {
"errors": [("ER04", "err desc"), ("ER05", "err desc")],
"file": "module2.py",
"file_line": 925,
},
},
)
exit_status = validate_docstrings.main(
func_name=None,
prefix=None,
errors=[],
output_format="default",
ignore_deprecated=False,
)
assert exit_status == 5
def test_no_exit_status_noerrors_for_validate_all(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
"validate_all",
lambda prefix, ignore_deprecated=False: {
"docstring1": {"errors": [], "warnings": [("WN01", "warn desc")]},
"docstring2": {"errors": []},
},
)
exit_status = validate_docstrings.main(
func_name=None,
prefix=None,
errors=[],
output_format="default",
ignore_deprecated=False,
)
assert exit_status == 0
def test_exit_status_for_validate_all_json(self, monkeypatch):
print("EXECUTED")
monkeypatch.setattr(
validate_docstrings,
"validate_all",
lambda prefix, ignore_deprecated=False: {
"docstring1": {
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
]
},
"docstring2": {"errors": [("ER04", "err desc"), ("ER05", "err desc")]},
},
)
exit_status = validate_docstrings.main(
func_name=None,
prefix=None,
errors=[],
output_format="json",
ignore_deprecated=False,
)
assert exit_status == 0
def test_errors_param_filters_errors(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
"validate_all",
lambda prefix, ignore_deprecated=False: {
"Series.foo": {
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"file": "series.py",
"file_line": 142,
},
"DataFrame.bar": {
"errors": [("ER01", "err desc"), ("ER02", "err desc")],
"file": "frame.py",
"file_line": 598,
},
"Series.foobar": {
"errors": [("ER01", "err desc")],
"file": "series.py",
"file_line": 279,
},
},
)
exit_status = validate_docstrings.main(
func_name=None,
prefix=None,
errors=["ER01"],
output_format="default",
ignore_deprecated=False,
)
assert exit_status == 3
exit_status = validate_docstrings.main(
func_name=None,
prefix=None,
errors=["ER03"],
output_format="default",
ignore_deprecated=False,
)
assert exit_status == 1
| bsd-3-clause | -8,343,854,184,638,662,000 | 26.203068 | 88 | 0.481485 | false |
paulmartel/voltdb | tests/scripts/examples/sql_coverage/strings-schema.py | 1 | 1950 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2016 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
{
"P1": {
"columns": (("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("ID", FastSerializer.VOLTTYPE_INTEGER),
("RATIO", FastSerializer.VOLTTYPE_FLOAT)),
"partitions": (),
"indexes": ("ID")
},
"R1": {
"columns": (("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("ID", FastSerializer.VOLTTYPE_INTEGER),
("RATIO", FastSerializer.VOLTTYPE_FLOAT)),
"partitions": (),
"indexes": ("ID")
}
}
| agpl-3.0 | -670,619,887,001,274,900 | 43.318182 | 73 | 0.657436 | false |
thedrow/cython | Cython/Compiler/Tests/TestJediTyper.py | 1 | 3980 | # -*- coding: utf-8 -*-
# tag: jedi
from __future__ import absolute_import
import unittest
from textwrap import dedent
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
from ..ParseTreeTransforms import NormalizeTree, InterpretCompilerDirectives
from .. import Main, Symtab, Visitor
from ...TestUtils import TransformTest
@contextmanager
def _tempfile(code):
code = dedent(code)
if isinstance(code, unicode):
code = code.encode('utf8')
with NamedTemporaryFile(suffix='.py') as f:
f.write(code)
f.seek(0)
yield f
def _test_typing(code, inject=False):
from ..JediTyper import analyse, inject_types
lines = []
with _tempfile(code) as f:
types = analyse(f.name)
if inject:
lines = inject_types(f.name, types)
return types, lines
class DeclarationsFinder(Visitor.VisitorTransform):
directives = None
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_CompilerDirectivesNode(self, node):
if not self.directives:
self.directives = []
self.directives.append(node)
self.visitchildren(node)
return node
class TestJediTyper(TransformTest):
def _test(self, code):
return _test_typing(code)[0]
def test_typing_global_int_loop(self):
code = '''\
for i in range(10):
a = i + 1
'''
types = self._test(code)
if not types:
# old Jedi version
return
self.assertIn((None, (1, 0)), types)
variables = types.pop((None, (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['int']), 'i': set(['int'])}, variables)
def test_typing_function_int_loop(self):
code = '''\
def func(x):
for i in range(x):
a = i + 1
return a
'''
types = self._test(code)
self.assertIn(('func', (1, 0)), types)
variables = types.pop(('func', (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['int']), 'i': set(['int'])}, variables)
def _test_conflicting_types_in_function(self):
code = '''\
def func(a, b):
print(a)
a = 1
b += a
a = 'abc'
return a, str(b)
print(func(1.5, 2))
'''
types = self._test(code)
self.assertIn(('func', (1, 0)), types)
variables = types.pop(('func', (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['int', 'str']), 'i': set(['int'])}, variables)
def _test_typing_function_char_loop(self):
code = '''\
def func(x):
l = []
for c in x:
l.append(c)
return l
print(func('abcdefg'))
'''
types = self._test(code)
self.assertIn(('func', (1, 0)), types)
variables = types.pop(('func', (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['int']), 'i': set(['int'])}, variables)
class TestTypeInjection(TestJediTyper):
"""
Subtype of TestJediTyper that additionally tests type injection and compilation.
"""
def setUp(self):
super(TestTypeInjection, self).setUp()
compilation_options = Main.CompilationOptions(Main.default_options)
ctx = compilation_options.create_context()
transform = InterpretCompilerDirectives(ctx, ctx.compiler_directives)
transform.module_scope = Symtab.ModuleScope('__main__', None, ctx)
self.declarations_finder = DeclarationsFinder()
self.pipeline = [NormalizeTree(None), transform, self.declarations_finder]
def _test(self, code):
types, lines = _test_typing(code, inject=True)
tree = self.run_pipeline(self.pipeline, ''.join(lines))
directives = self.declarations_finder.directives
# TODO: validate directives
return types
| apache-2.0 | -3,737,469,438,165,437,000 | 28.701493 | 84 | 0.576884 | false |
Gorah/py_accuracy_report | runreport.py | 1 | 34212 | #Script written for Python 2.7
#Dependencies to download: pyodbc (SQL Server Drivers)
import pyodbc
import datetime
import sys
import re
from contextlib import contextmanager
LATE_CASES = {}
@contextmanager
def get_connection():
"""
Connect to DB
"""
cnxn = pyodbc.connect('DRIVER={SQL SERVER};SERVER=BPOPLMCBC16;DATABASE=AdminTracker_SQL;UID=AppLogon;PWD=ZdrojHPL1950')
yield cnxn.cursor()
cnxn.commit()
def get_DBdata(sql, sD, eD, cursor):
"""
This function takes SQL string and connection object and returns
rowset with data
"""
if(sD):
cursor.execute(sql, sD, eD)
else:
cursor.execute(sql)
try:
rows = cursor.fetchall()
except Error as err:
rows = None
print err.strerror
sys.exit(0)
return rows
def count_days(row, userecdate=False):
"""
This function calculates number of days between Cut Off Date and
date of receiving complete documents.
"""
cutD = row.CutOffDate
if userecdate:
recD = row.DateReceived
else:
#if CompleteDocsDate is missing, use current date instead
try:
recD = row.CompleteDocsDate
except AttributeError:
recD = datetime.datetime.now()
if not recD:
recD = datetime.datetime.now()
return day_diff(recD, cutD)
def day_diff(date1, date2):
"""
This function returns difference in days between 2 dates.
"""
days = (date1 - date2).days
if days > 0:
return days
else:
return 0
def write_to_dict(row, ttype, notes):
"""
This function fills dictionary with a new entry (which is another
dictionary containing all the necessary data)
"""
#new empty dictionary is created to store ticket data in it
case_descr = {}
case_descr['type'] = ttype
#This allows overriding default notes script overriding
case_descr['notes'] = notes
if not row.Forname:
forename = ' '
else:
forename = row.Forname
case_descr['eename'] = row.Surname + ' ' + forename
case_descr['eeid'] = row.EEID
case_descr['rootcause'] = row.CauseText
#new dictionary is appended to general dict under ticket ID as key
LATE_CASES[row.ID] = case_descr
def contract_exp_by_dates(sD, eD, cursor):
"""
This function takes date boundaries and connection object and
fetches data from DB. Then it sends recordsets for further
processing.
This function covers Contract Expiration - Late Submission category.
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname, E.Surname, T.SourceID,
R.CauseText, T.InRejComment
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (262, 330)) AND
(T.DateReceived BETWEEN ? AND ?) AND
(T.EffectiveDate < T.DateReceived OR T.CutOffDate < T.DateReceived)"""
ttype = 'Contract Expiration - Late Renewal Submission'
#getting recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
"""if there are any rows in response we're checking each row to
determine which piece of string to use in description of case.
After string is determined row and meta data are sent to be added
to dictionary.
"""
if result:
for row in result:
compDocs = get_compDocsString(row.CompleteDocsDate, row.InRejComment)
docs_rec = get_compDocsString(row.CompleteDocsDate)
notes = ('"%s%s.\n%s%s.\n%s.\n%s%s.\n%s%s.\n%s%d.\n%s."' %
('Contract End date ',
row.EffectiveDate.strftime('%d/%m/%Y'),
'PCR received on ',
row.DateReceived.strftime('%d/%m/%Y'),
docs_rec,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late for payroll cut off: ',
day_diff(datetime.datetime.now(), row.CutOffDate),
row.EEImpact
))
write_to_dict(row, ttype, notes)
def contract_no_response(sD, eD, cursor):
"""
This function finds records where there was no response for end
of contract reminder.
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname,
E.Surname, T.LetterSentOn, R.CauseText
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE T.ProcessID IN (352, 350, 383, 399) AND
(T.DateReceived BETWEEN ? AND ?) AND
((T.EffectiveDate < GETDATE() AND T.SignedLetterReceivedOn is null)
OR (T.CutOffDate < GETDATE() AND T.SignedLetterReceivedOn is null))"""
#getting data from DB
result = get_DBdata(sql, sD, eD, cursor)
if result:
for row in result:
if row.LetterSentOn:
letter = ('%s%s' %('Email to manager sent on ',
row.LetterSentOn.strftime('%d/%m/%Y')))
else:
letter = 'Email not sent yet'
notes = ('"%s%s.\n%s.\n%s%s.\n%s.\n%s%d.\n%s."' %
('Contract End date ',
row.EffectiveDate.strftime('%d/%m/%Y'),
letter,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Response not received from LM',
'Days late for payroll cut off: ',
day_diff(datetime.datetime.now(), row.CutOffDate),
row.EEImpact
))
write_to_dict(row, 'Contract Expiration - No Response', notes)
def contract_exp_by_letters(sD, eD, cursor):
"""
This function fetches data for Contract Expiration category,scoping
for the letter tickets. Late tickets are fetched and are split into
2 different types: 'no response' and 'late submission'.
Data is later sent to be written to dictionary
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname, T.SignedLetterReceivedOn,
E.Surname, T.LetterSentOn, R.CauseText FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (349, 351, 352, 350, 383, 399)) AND
(T.DateReceived BETWEEN ? AND ?) AND
((T.EffectiveDate < GETDATE() AND T.SignedLetterRequired = 1)
OR (T.CutOffDate < GETDATE() AND T.SignedLetterRequired = 1))"""
notes_name = 'Contract End effective date '
#getting recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
"""if there are any rows in response we're checking each row to
determine which piece of string to use in description of case.
After string is determined row and meta data are sent to be added
to dictionary.
"""
if result:
for row in result:
#############################
#TEMP STUFF - REMOVE IN PROD
if not row.LetterSentOn:
LetterSentOn = datetime.datetime(2010, 10, 10)
else:
LetterSentOn = row.LetterSentOn
###################################
if not row.SignedLetterReceivedOn:
SignedLetterReceivedOn = datetime.datetime.today()
else:
SignedLetterReceivedOn = row.SignedLetterReceivedOn
if row.LetterSentOn:
letter = ('%s%s' %('Email to manager sent on ',
row.LetterSentOn.strftime('%d/%m/%Y')))
else:
letter = 'Email not sent yet'
#create statuses of signed letter received back
#basing on date conditions
if row.SignedLetterReceivedOn:
sigLetter = ('%s%s' % ('Response from LM received on ',
# row.SignedLetterReceivedOn.strftime('%d/%m/%Y')))
SignedLetterReceivedOn.strftime('%d/%m/%Y')))
else:
sigLetter = 'Response from LM not yet returned'
ttype = 'Contract Expiration - Late Renewal Submission'
notes = ('"%s%s.\n%s.\n%s%s.\n%s.\n%s%d.\n%s."' %
('Contract End date ',
row.EffectiveDate.strftime('%d/%m/%Y'),
letter,
'Response should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
sigLetter,
'Days late for payroll cut off: ',
day_diff(SignedLetterReceivedOn, row.CutOffDate),
row.EEImpact
))
write_to_dict(row, ttype, notes)
def late_loa(sD, eD, cursor):
"""
This function finds late loa cases
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.EEImpact, E.EEID, E.Forname, E.Surname, P.ProcessName, T.SourceID, R.CauseText
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tProcess as P ON T.ProcessID = P.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (246, 261, 264, 282, 284, 289, 305,
306, 326, 341)) AND
(T.DateReceived BETWEEN ? AND ?)"""
ttype = 'Leave of Absence - Late Submission'
#getting recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
#if there are any records in the recordset they need to be analized if
#they are late.
if result:
for row in result:
#checks if row is late. if yes adds an entry
if check_if_late_loa(row):
source = get_source_string(row.SourceID)
friday = row.EffectiveDate + datetime.timedelta(days=(4 - row.EffectiveDate.weekday()))
notes = ('"%s%s.\n%s%s.\n%s%s.\n%s%s.\n%s%d.\n%s"' %
('Process type: ',
row.ProcessName,
'Effective date ',
row.EffectiveDate.strftime('%d/%m/%Y'),
'Request should be submitted by ',
friday.strftime('%d/%m/%Y'),
source,
row.DateReceived.strftime('%d/%m/%Y'),
'Days late: ',
day_diff(row.DateReceived, friday),
row.EEImpact
))
write_to_dict(row, ttype, notes)
def check_if_late_loa(row):
"""
This function checks if loa entry is late or not based on business req.
"""
#find how many days friday is away from
diff = 4 - row.EffectiveDate.weekday()
fridayDate = row.EffectiveDate + datetime.timedelta(days=diff)
#checks if date received is greater than date of Friday in the week when
#effective date took place
if (row.DateReceived - fridayDate).days > 0:
return True
else:
return False
def ret_from_loa_by_dates(sD, eD, cursor):
"""
This function collects data about Return From LOA category and
sends records with late tickets to be added to dictionary.
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname, E.Surname, R.CauseText,
T.SourceID, T.InRejComment
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID = 325) AND
(T.DateReceived BETWEEN ? AND ?) AND (T.EffectiveDate < T.DateReceived)"""
ttype = 'Return from Leave - Late Submission'
#getting recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
#if there are any records in the recordset each row is sent to be
#added to dictionary
if result:
for row in result:
source = get_source_string(row.SourceID)
#make sure to use a date. If complete docs la
compDocs = get_compDocsString(row.CompleteDocsDate, row.InRejComment)
dateRec = get_docsDate(row.CompleteDocsDate)
if (row.DateReceived - row.EffectiveDate).days > 0:
notes = ('"%s%s.\n%s%s.\n%s.\n%s%s.\n%s%d.\n%s"' %('Return effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
source,
row.DateReceived.strftime('%d/%m/%Y'),
compDocs,
'Request should be submitted by ',
row.EffectiveDate.strftime('%d/%m/%Y'),
'Days late for payroll cut off: ',
day_diff(dateRec, row.EffectiveDate),
row.EEImpact
))
write_to_dict(row, ttype, notes)
def late_by_action(sD, eD, scope, procname, cursor):
"""
This function finds late job change actions in SAP among tickets
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname, E.Surname , R.CauseText,
T.SourceID, T.InRejComment
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (""" + scope + """) AND
T.DateReceived BETWEEN ? AND ?) AND
(((T.EffectiveDate < T.CompleteDocsDate) OR
(T.CutOffDate < T.CompleteDocsDate) AND T.CompleteDocsDate IS NOT NULL)
OR ((T.EffectiveDate < T.DateReceived OR T.CutOffDate < T.DateReceived) AND
T.CompleteDocsDate IS NULL))"""
ttype = procname + " - Late Submission"
#getting recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
if result:
for row in result:
source = get_source_string(row.SourceID)
compDocs = get_compDocsString(row.CompleteDocsDate, row.InRejComment)
dateRec = get_docsDate(row.CompleteDocsDate)
notes = ('"%s%s.\n%s%s.\n%s.\n%s%s.\n%s%d.\n%s"' %
(procname + ' effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
source,
row.DateReceived.strftime('%d/%m/%Y'),
compDocs,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late for payroll cut off: ',
day_diff(dateRec, row.CutOffDate),
row.EEImpact
))
write_to_dict(row, ttype, notes)
def late_by_letters(sD, eD, scope, procname, cursor):
"""
This function finds late job change letters
"""
sql = """SELECT T.ID, T.DateReceived, T.CompleteDocsDate, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.SignedLetterReceivedOn,
T.NumberOfReminders, E.EEID, E.Forname,
E.Surname, T.LetterReceived, T.SignedLetterRequired,
T.LetterSentOn, R.CauseText, T.SourceID, T.InRejComment
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (""" + scope + """)) AND
(T.DateReceived BETWEEN ? AND ?) AND
((T.EffectiveDate < T.CompleteDocsDate) OR
(T.CutOffDate < T.CompleteDocsDate) OR
(T.EffectiveDate < T.SignedLetterReceivedOn AND T.SignedLetterRequired = 1
AND T.SignedLetterReceivedOn IS NOT NULL) OR
(T.CutOffDate < T.SignedLetterReceivedOn AND T.SignedLetterRequired = 1
AND T.SignedLetterReceivedOn IS NOT NULL) OR
(T.SignedLetterRequired = 1 AND T.SignedLetterReceivedOn IS NULL AND
T.EffectiveDate < GETDATE()) OR
(T.SignedLetterRequired = 1 AND T.SignedLetterReceivedOn IS NULL AND
T.CutOffDate < GETDATE()))"""
ttype = procname + " - Late Submission"
#grab recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
if result:
for row in result:
#############################
#TEMP STUFF - REMOVE IN PROD
if not row.LetterSentOn:
LetterSentOn = datetime.datetime(2010, 10, 10)
else:
LetterSentOn = row.LetterSentOn
if not row.SignedLetterReceivedOn:
SignedLetterReceivedOn = datetime.datetime(2010, 10, 10)
else:
SignedLetterReceivedOn = row.SignedLetterReceivedOn
###################################
source = get_source_string(row.SourceID)
compDocs = get_compDocsString(row.CompleteDocsDate, row.InRejComment)
dateRec = get_docsDate(row.CompleteDocsDate)
#create statuses of signed letter received back
#basing on date conditions
if row.LetterReceived == 1 and row.SignedLetterReceivedOn:
sigLetter = ('%s%s' % ('Signed letter received on ',
#row.SignedLetterReceivedOn.strftime('%d/%m/%Y')))
SignedLetterReceivedOn.strftime('%d/%m/%Y')))
sigLetterRec = True
elif row.LetterReceived == 1 and row.SignedLetterRequired == 1 and not row.SignedLetterReceivedOn:
sigLetter = 'Signed letter not yet returned'
sigLetterRec = True
elif row.LetterReceived == 0:
sigLetterRec = False
#create statuses for letter sent, offer pack sent based on dates
if row.LetterReceived == 1:
letterSent = ('%s%s' % ('Letter sent on ',
#row.LetterSentOn.strftime('%d/%m/%Y')))
LetterSentOn.strftime('%d/%m/%Y')))
else:
letterSent = 'Letter not sent yet'
#calculate amount of days late basing on currenn document and contract statuses
#and on docs submission date
if row.CompleteDocsDate > row.CutOffDate:
days = day_diff(row.CompleteDocsDate, row.CutOffDate)
elif row.CompleteDocsDate > row.EffectiveDate:
days = day_diff(row.CompleteDocsDate, row.EffectiveDate)
if row.SignedLetterReceivedOn:
if row.SignedLetterReceivedOn > row.CutOffDate:
days = day_diff(row.SignedLetterReceivedOn, row.CutOffDate)
elif row.SignedLetterReceivedOn > row.EffectiveDate:
days = day_diff(row.SignedLetterReceivedOn, row.EffectiveDate)
#create notes field
if sigLetterRec:
notes = ('"%s%s.\n%s%s.\n%s.\n%s.\n%s.\n%s%s.\n%s%d.\n%s."' %
(procname + ' effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
source,
row.DateReceived.strftime('%d/%m/%Y'),
compDocs,
letterSent,
sigLetter,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late for payroll cut off: ',
days,
row.EEImpact
))
else:
notes = ('"%s%s.\n%s%s.\n%s.\n%s.\n%s%s.\n%s%d.\n%s."' %
(procname + ' effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
source,
row.DateReceived.strftime('%d/%m/%Y'),
compDocs,
letterSent,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late for payroll cut off: ',
days,
row.EEImpact
))
write_to_dict(row, ttype, notes)
def late_hire(sD, eD, cursor):
"""
This function finds late hire actions
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname, E.Surname , T.LetterReceived,
T.LetterSentOn, T.SignedLetterReceivedOn, T.CloseDate, R.CauseText, T.InRejComment
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (371, 372) AND
(T.DateReceived BETWEEN ? AND ?)) AND
((T.EffectiveDate < T.DateReceived OR T.CutOffDate < T.DateReceived
AND T.CompleteDocsDate IS NULL) OR (T.SignedLetterReceivedOn > T.EffectiveDate)
OR (T.SignedLetterReceivedOn > T.CutOffDate) OR (T.CompleteDocsDate > T.EffectiveDate
OR T.CompleteDocsDate > T.CutOffDate) OR
(T.SignedLetterReceivedOn IS NULL AND (T.CutOffDate < GETDATE() OR
T.EffectiveDate < GETDATE())))"""
result = get_DBdata(sql, sD, eD, cursor)
ttype = 'Hires - Missing Documentation'
if result:
for row in result:
# if complete documents date is set use it as Complete docs received on
# else note that complete docs were not received yet
compDocs = get_compDocsString(row.CompleteDocsDate, row.InRejComment)
#create statuses of signed letter received back
#basing on date conditions
if row.LetterReceived == 1 and row.SignedLetterReceivedOn:
sigLetter = ('%s%s' % ('Signed contract received on ',
row.SignedLetterReceivedOn.strftime('%d/%m/%Y')))
sigLetterRec = True
elif row.LetterReceived == 1 and not row.SignedLetterReceivedOn:
sigLetter = 'Signed contract not yet returned'
sigLetterRec = True
elif row.LetterReceived == 0:
sigLetterRec = False
#create statuses for letter sent, offer pack sent based on dates
if row.CloseDate:
letterSent = ('%s%s' % ('Contract sent on ',
row.CloseDate.strftime('%d/%m/%Y')))
offPack = ('%s%s' % ('Offer pack sent on ',
row.CloseDate.strftime('%d/%m/%Y')))
else:
letterSent = 'Contract not sent yet'
offPack = 'Offer pack not sent yet'
#This checks if complete docs date has been filled in. If not,
#we can assume that complete documents are not yet provided and
#we are using current date instead.
if row.CompleteDocsDate:
docsRecDate = row.CompleteDocsDate
else:
docsRecDate = datetime.datetime.today()
#calculate amount of days late basing on currenn document and contract statuses
#and on docs submission date
if docsRecDate > row.CutOffDate:
days = day_diff(docsRecDate, row.CutOffDate)
elif docsRecDate > row.EffectiveDate:
days = day_diff(docsRecDate, row.EffectiveDate)
if row.SignedLetterReceivedOn:
if row.SignedLetterReceivedOn > row.CutOffDate:
days = day_diff(row.SignedLetterReceivedOn, row.CutOffDate)
elif row.SignedLetterReceivedOn > row.EffectiveDate:
days = day_diff(row.SignedLetterReceivedOn, row.EffectiveDate)
#create notes string
if sigLetterRec:
notes = ('"%s%s.\n%s.\n%s.\n%s.\n%s.\n%s%s.\n%s%d.\n%s"' %('New Hire effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
compDocs,
letterSent,
offPack,
sigLetter,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late: ',
days,
row.EEImpact))
else:
notes = ('"%s%s.\n%s.\n%s.\n%s.\n%s%s.\n%s%d.\n%s"' %('New Hire effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
compDocs,
letterSent,
offPack,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late: ',
days,
row.EEImpact))
#write result to dictionary
write_to_dict(row, ttype, notes)
def late_termination(sD, eD, cursor):
"""
This function finds late job change actions in SAP among tickets
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname, E.Surname , R.CauseText,
T.SourceID, T.InRejComment
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (336, 337, 338) AND
T.DateReceived BETWEEN ? AND ?) AND
(((T.EffectiveDate < T.CompleteDocsDate) OR
(T.CutOffDate < T.CompleteDocsDate) AND T.CompleteDocsDate IS NOT NULL)
OR ((T.EffectiveDate < T.DateReceived OR T.CutOffDate < T.DateReceived) AND
T.CompleteDocsDate IS NULL))"""
ttype = "Termination - Late Submission"
#getting recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
if result:
for row in result:
source = get_source_string(row.SourceID)
compDocs = get_compDocsString(row.CompleteDocsDate, row.InRejComment)
dateRec = get_docsDate(row.CompleteDocsDate)
notes = ('"%s%s.\n%s%s.\n%s.\n%s%s.\n%s%d.\n%s"' %
('Termination effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
source,
row.DateReceived.strftime('%d/%m/%Y'),
compDocs,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late for payroll cut off: ',
day_diff(dateRec, row.CutOffDate),
row.EEImpact
))
write_to_dict(row, ttype, notes)
def termination_checklist_check(cursor):
"""
This function finds all unsubmitted termination checklists and
feeds them into dictionary.
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname,
E.Surname, T.LetterReceived, R.CauseText
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID = 417) AND (T.LetterReceived = 0)
AND (T.EffectiveDate < GETDATE()) AND (T.CurrentStatus <> 1)"""
ttype = 'Termination - No Termination Checklist submitted'
#getting recordset from DB
sD = None
eD = None
result = get_DBdata(sql, sD, eD, cursor)
#if there are any records in the recordset each row is sent to be
#added to dictionary
if result:
for row in result:
notes = ('Possible SOX audit compliance issue')
write_to_dict(row, ttype, notes)
def get_source_string(sourceID):
if sourceID == 2:
return 'PCR received on '
else:
return 'Non-PCR request received on'
def get_docsDate(compdate):
if compdate:
return compdate
else:
return datetime.datetime.today()
def get_compDocsString(compdate, details = None):
if details:
addcoments = (' (details about missing data: %s)' % (details))
else:
addcoments = ''
if compdate:
return ('%s%s%s' % ('Complete request received on ',
compdate.strftime('%d/%m/%Y'),
addcoments))
else:
return 'Complete documents still pending'
def write_to_file():
"""
This function saves report to csv file
"""
#Open file to save report to
report = open('report.csv', 'w')
for key in LATE_CASES:
#build file entry row from dict data
fileentry = '%d,%s,%s,%s,%s,%d' % (key, LATE_CASES[key]['type'],
LATE_CASES[key]['notes'],
LATE_CASES[key]['rootcause'],
LATE_CASES[key]['eename'],
LATE_CASES[key]['eeid'])
#write etry to file
report.write(fileentry + '\n')
#close the file
report.close()
def runReport(sD, eD):
with get_connection() as cursor:
#Contract Expiration section
contract_exp_by_dates(sD, eD, cursor)
contract_exp_by_letters(sD, eD, cursor)
contract_no_response(sD, eD, cursor)
#LOA section
late_loa(sD, eD, cursor)
#Return From LOA section
ret_from_loa_by_dates(sD, eD, cursor)
#Job Change section
#Job Changes action tickets
procname = "Job Change"
scope = "315, 331, 323, 335, 340, 339"
late_by_action(sD, eD, scope, procname, cursor)
#Job Changes letter tickets
scope = '363, 385, 386, 400, 410, 412, 413'
late_by_letters(sD, eD, scope, procname, cursor)
#New Hire section
late_hire(sD, eD, cursor)
#Pay Changes section
procname = 'Pay Change'
#Pay Changes action tickets
scope = '327, 328, 329'
late_by_action(sD, eD, scope, procname, cursor)
#Pay Changes letter tickets
scope = '395, 396, 397, 347'
late_by_letters(sD, eD, scope, procname, cursor)
#Termination section
procname = 'Termination'
#Termination actions
late_termination(sD, eD, cursor)
#Termination checklist
termination_checklist_check(cursor)
#Save the report to file
write_to_file()
if __name__ == '__main__':
"""
Program entry point.
Command line argument should contain a date in YYYY-MM-DD format
"""
#making sure that date will be passed and in correct format
if len(sys.argv) < 3:
print "Missing date, please pass it as an argument!"
sys.exit()
elif not re.match(r"\d{4}-\d{2}-\d{2}", sys.argv[1]):
print "Incorrect date format - should be YYYY-MM-DD"
sys.exit()
elif not re.match(r"\d{4}-\d{2}-\d{2}", sys.argv[2]):
print "Incorrect date format - should be YYYY-MM-DD"
sys.exit()
runReport(sys.argv[1], sys.argv[2])
| gpl-2.0 | -7,754,132,641,585,172,000 | 40.519417 | 123 | 0.512949 | false |
closeio/socketshark | socketshark/utils.py | 1 | 3506 | import asyncio
import ssl
import time
from urllib.parse import urlsplit, urlunsplit
import aiohttp
from . import constants as c
def _get_rate_limit_wait(log, resp, opts):
"""
Returns the number of seconds we should wait given a 429 HTTP response and
HTTP options.
"""
max_wait = 3600
wait = opts['wait']
header_name = opts['rate_limit_reset_header_name']
if header_name and header_name in resp.headers:
header_value = resp.headers[header_name]
try:
new_wait = float(header_value)
# Make sure we have a valid value (not negative, NaN, or Inf)
if 0 <= new_wait <= max_wait:
wait = new_wait
elif new_wait > max_wait:
log.warn(
'rate reset value too high',
name=header_name,
value=header_value,
)
wait = max_wait
else:
log.warn(
'invalid rate reset value',
name=header_name,
value=header_value,
)
except ValueError:
log.warn(
'invalid rate reset value',
name=header_name,
value=header_value,
)
return wait
def _scrub_url(url):
"""Scrub URL username and password."""
url_parts = urlsplit(url)
if url_parts.password is None:
return url
else:
# url_parts tuple doesn't include password in _fields
# so can't easily use _replace to get rid of password
# and then call urlunsplit to reconstruct url.
_, _, hostinfo = url_parts.netloc.rpartition('@')
scrubbed_netloc = f'*****:*****@{hostinfo}'
scrubbed_url_parts = url_parts._replace(netloc=scrubbed_netloc)
return urlunsplit(scrubbed_url_parts)
async def http_post(shark, url, data):
log = shark.log.bind(url=_scrub_url(url))
opts = shark.config['HTTP']
if opts.get('ssl_cafile'):
ssl_context = ssl.create_default_context(cafile=opts['ssl_cafile'])
else:
ssl_context = None
conn = aiohttp.TCPConnector(ssl_context=ssl_context)
async with aiohttp.ClientSession(connector=conn) as session:
wait = opts['wait']
for n in range(opts['tries']):
if n > 0:
await asyncio.sleep(wait)
try:
start_time = time.time()
response_data = None
async with session.post(
url, json=data, timeout=opts['timeout']
) as resp:
if resp.status == 429: # Too many requests.
wait = _get_rate_limit_wait(log, resp, opts)
continue
else:
wait = opts['wait']
resp.raise_for_status()
response_data = await resp.json()
return response_data
except aiohttp.ClientError:
log.exception('unhandled exception in http_post')
except asyncio.TimeoutError:
log.exception('timeout in http_post')
finally:
log.debug(
'http request',
request=data,
response=response_data,
duration=time.time() - start_time,
)
return {'status': 'error', 'error': c.ERR_SERVICE_UNAVAILABLE}
| mit | -1,969,585,453,855,249,400 | 32.711538 | 78 | 0.51911 | false |
Architizer/Feedly | feedly/serializers/cassandra/aggregated_activity_serializer.py | 1 | 1244 | from feedly.activity import AggregatedActivity
from feedly.serializers.aggregated_activity_serializer import AggregatedActivitySerializer
import pickle
class CassandraAggregatedActivitySerializer(AggregatedActivitySerializer):
def __init__(self, model):
self.model = model
def dumps(self, aggregated):
activities = pickle.dumps(aggregated.activities)
model_instance = self.model(
activity_id=long(aggregated.serialization_id),
activities=activities,
group=aggregated.group,
created_at=aggregated.created_at,
updated_at=aggregated.updated_at,
seen_at=aggregated.seen_at,
read_at=aggregated.read_at
)
return model_instance
def loads(self, serialized_aggregated):
activities = pickle.loads(serialized_aggregated.activities)
aggregated = AggregatedActivity(
group=serialized_aggregated.group,
activities=activities,
created_at=serialized_aggregated.created_at,
updated_at=serialized_aggregated.updated_at,
seen_at=serialized_aggregated.seen_at,
read_at=serialized_aggregated.read_at
)
return aggregated
| bsd-3-clause | -4,981,306,401,176,553,000 | 35.588235 | 90 | 0.67283 | false |
tgquintela/pythonUtils | pythonUtils/tests.py | 1 | 1339 |
"""
tests
-----
The module which call and centralize all the tests utilities.
"""
from parallel_tools import test_parallel_tools
from Logger import test_logger
from TUI_tools import test_tui_tools
from CodingText import test_codingtext
from ProcessTools import test_processtools
from numpy_tools import test_numpytools
from ExploreDA import test_exploreDA
from MetricResults import test_metricresults
from CollectionMeasures import test_collectionmeasures
from Combinatorics import test_combinatorics
from sklearn_tools import test_sklearntools
from Perturbations import test_perturbations
from perturbation_tests import test_perturbationtests
from NeighsManager import test_neighsmanager
## Check administrative
import release
import version
## Not inform about warnings
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
warnings.simplefilter("ignore")
def test():
## Tests of modules
test_parallel_tools.test()
test_logger.test()
test_tui_tools.test()
test_codingtext.test()
test_numpytools.test()
test_processtools.test()
test_exploreDA.test()
test_metricresults.test()
test_collectionmeasures.test()
test_combinatorics.test()
test_perturbations.test()
test_sklearntools.test()
# test_perturbationtests.test()
test_neighsmanager.test()
| mit | 7,229,336,468,678,593,000 | 25.78 | 61 | 0.779686 | false |
Micutio/CAB_Simulations | SugarScape/ca/ss_cell.py | 1 | 1856 | """
Module containing the cell definition for the Sugarscape world.
"""
from cab.ca.cell import CellHex
__author__ = 'Michael Wagner'
__version__ = '1.0'
class WorldCell(CellHex):
def __init__(self, x, y, gc):
super().__init__(x, y, gc)
self.t_gen = None
self.sugar = 0
self.spice = 0
self.max_sugar = 0
self.max_spice = 0
self.growth_cycle = 3
self.growth_cycle_count = 0
self.state = False
def set_terrain_gen(self, tg):
self.t_gen = tg
self.sugar = int(self.t_gen.get(self.x, self.y))
self.spice = int(self.gc.MAX_SUGAR - self.sugar)
self.max_sugar = int(self.t_gen.get(self.x, self.y))
self.max_spice = int(self.gc.MAX_SUGAR - self.sugar)
# print("sugar: {0}, spice: {1}".format(self.sugar, self.spice))
def clone(self, x, y):
wc = WorldCell(x, y, self.gc)
wc.set_terrain_gen(self.t_gen)
return wc
def sense_neighborhood(self):
pass
def update(self):
if self.growth_cycle_count == self.growth_cycle:
if self.sugar < self.max_sugar:
self.sugar += 1
if self.spice < self.max_spice:
self.spice += 1
self.growth_cycle_count = 0
else:
self.growth_cycle_count += 1
self.calculate_cell_color()
def calculate_cell_color(self):
if self.max_sugar == 0:
normalized_su = 0
else:
normalized_su = self.sugar / self.gc.MAX_SUGAR
if self.max_spice == 0:
normalized_sp = 0
else:
normalized_sp = self.spice / self.gc.MAX_SUGAR
red = int(min(max(0, 150 * normalized_sp), 255))
green = int(min(max(0, 200 * normalized_su), 255))
blue = 0
self.color = (red, green, blue)
| mit | 2,815,983,188,803,360,300 | 28.460317 | 72 | 0.542026 | false |
Subsets and Splits