repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ninuxorg/netdiff | tests/test_batman.py | 1 | 4523 | import os
import networkx
from netdiff import BatmanParser, diff
from netdiff.exceptions import ParserError
from netdiff.tests import TestCase
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
iulinet = open('{0}/static/batman.json'.format(CURRENT_DIR)).read()
iulinet2 = open('{0}/static/batman-1+1.json'.format(CURRENT_DIR)).read()
duplicated = open('{0}/static/batman-duplicated.json'.format(CURRENT_DIR)).read()
class TestBatmanParser(TestCase):
""" tests for BatmanParser """
def test_parse(self):
p = BatmanParser(iulinet)
self.assertIsInstance(p.graph, networkx.Graph)
properties = list(p.graph.edges(data=True))[0][2]
self.assertIsInstance(properties['weight'], float)
# test additional properties in nodes of networkx graph
properties = list(p.graph.nodes(data=True))[0][1]
self.assertIsInstance(properties['local_addresses'], list)
self.assertIsInstance(properties['clients'], list)
def test_parse_exception(self):
with self.assertRaises(ParserError):
BatmanParser('{ "test": "test" }')
def test_parse_exception2(self):
with self.assertRaises(ParserError):
BatmanParser('{ "topology": [{ "a": "a" }] }')
def test_json_dict(self):
p = BatmanParser(iulinet)
data = p.json(dict=True)
self.assertIsInstance(data, dict)
self.assertEqual(data['type'], 'NetworkGraph')
self.assertEqual(data['protocol'], 'batman-adv')
self.assertEqual(data['version'], '2014.3.0')
self.assertEqual(data['metric'], 'TQ')
self.assertIsInstance(data['nodes'], list)
self.assertIsInstance(data['links'], list)
self.assertEqual(len(data['nodes']), 5)
self.assertEqual(len(data['links']), 4)
self.assertIsInstance(data['links'][0]['cost'], float)
# ensure additional node properties are present
found = False
for node in data['nodes']:
if node['id'] == '90:f6:52:f2:8c:2c':
self.assertIsInstance(node['local_addresses'], list)
self.assertIsInstance(node['properties']['clients'], list)
found = True
break
self.assertTrue(found)
found = False
for node in data['nodes']:
if node['id'] == 'a0:f3:c1:96:94:06':
found = True
break
self.assertTrue(found)
def test_json_string(self):
p = BatmanParser(iulinet)
data = p.json()
self.assertIsInstance(data, str)
self.assertIn('NetworkGraph', data)
self.assertIn('protocol', data)
self.assertIn('version', data)
self.assertIn('metric', data)
self.assertIn('batman-adv', data)
self.assertIn('2014.3.0', data)
self.assertIn('TQ', data)
self.assertIn('links', data)
self.assertIn('nodes', data)
def test_added_removed_1_node(self):
old = BatmanParser(iulinet)
new = BatmanParser(iulinet2)
result = diff(old, new)
self.assertIsInstance(result, dict)
self.assertTrue(type(result['added']['links']) is list)
self.assertTrue(type(result['removed']['links']) is list)
# ensure there are no differences
self.assertEqual(len(result['added']['links']), 1)
self.assertEqual(len(result['removed']['links']), 1)
self._test_expected_links(
graph=result['added'],
expected_links=[('a0:f3:c1:96:94:10', '90:f6:52:f2:8c:2c')],
)
self._test_expected_links(
graph=result['removed'],
expected_links=[('a0:f3:c1:96:94:06', '90:f6:52:f2:8c:2c')],
)
def test_no_changes(self):
old = BatmanParser(iulinet)
new = BatmanParser(iulinet)
result = diff(old, new)
self.assertIsInstance(result, dict)
self.assertIsNone(result['added'])
self.assertIsNone(result['removed'])
def test_duplicated(self):
nodup = BatmanParser(iulinet)
dup = BatmanParser(duplicated)
# nodup and dup have the same amount of nodes and edges
self.assertEqual(len(nodup.graph.edges()), len(dup.graph.edges()))
self.assertEqual(len(nodup.graph.nodes()), len(dup.graph.nodes()))
def test_get_primary_address_ValueError(self):
p = BatmanParser(iulinet)
r = p._get_primary_address('bb:aa:cc:dd:ee:ff', [['aa:bb:cc:dd:ee:ff']])
self.assertEqual(r, 'bb:aa:cc:dd:ee:ff')
| mit | -7,793,895,834,004,656,000 | 37.991379 | 81 | 0.608004 | false |
RyanChinSang/ECNG3020-ORSS4SCVI | BETA/dev02/test3.py | 1 | 4947 | from __future__ import print_function
from threading import Thread
import sys
import cv2
import numpy as np
import queue
from BETA.dev02.test2 import avg_color
from BETA.dev02.test4 import t2s_say
class VideoStream:
def __init__(self, src=None, height=None, width=None, ratio=None):
cv2.setUseOptimized(True)
if src is None:
camera_list = []
for i in range(10):
cap = cv2.VideoCapture(i)
if cap.isOpened():
camera_list += [i]
cap.release()
if len(camera_list) == 1:
src = camera_list[0]
elif len(camera_list) == 0:
src = -1
print('NOTICE: There were no detected working cameras for indexes 0 to 10!')
else:
src = camera_list[0]
msg = 'NOTICE: There are ' + str(len(camera_list) - 1) \
+ ' other operational camera source(s) available: ' + str(camera_list[1:])
print(msg.replace('are', 'is')) if len(camera_list) - 1 == 1 else print(msg)
self.avg = np.array([])
self.freq = cv2.getTickFrequency()
self.begin = 0
self.stream = cv2.VideoCapture(src)
self.config(dim=None, height=height, width=width, ratio=ratio)
(self.grabbed, self.frame) = self.stream.read()
self.released = not self.grabbed
def start(self):
if sys.version[0] == '3':
Thread(target=self.update, args=(), daemon=True).start()
else:
Thread(target=self.update, args=()).start()
return self
def update(self):
while True:
if self.released:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self, width=None, height=None, ratio=None):
self.begin = cv2.getTickCount()
return (not self.released), self.resize(frame=self.frame, width=width, height=height, ratio=ratio)
def release(self):
self.stream.release()
self.released = True
def isOpened(self):
return not self.released
def fps(self):
self.avg = np.append(self.avg, (self.freq / (cv2.getTickCount() - self.begin)))
return self.avg[-1]
def avg_fps(self):
self.avg = np.append(self.avg, (self.freq / (cv2.getTickCount() - self.begin)))
return self.avg.mean()
def config(self, dim, height, width, ratio):
if ratio is None:
if height and width:
dim = (height, (height * float(width / height)))
elif not height and not width:
pass
else:
print('WARNING: Insufficient configuration parameters. The default was used.')
else:
if height:
dim = (height, (height * float(ratio)))
elif width:
dim = ((width / float(ratio)), width)
if dim:
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.round_up(dim[0]))
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.round_up(dim[1]))
def resize(self, frame, width, height, ratio):
dim = (dheight, dwidth) = frame.shape[:2]
if ratio is None:
if width and height:
dim = (height, width)
elif width and height is None:
dim = ((dheight * (width / dwidth)), width)
elif width is None and height:
dim = (height, (dwidth * (height / dheight)))
else:
if width is None and height is None:
dim = (dheight, (dheight * ratio))
elif width is None and height:
dim = (height, (height * ratio))
elif width and height is None:
dim = ((width / ratio), width)
else:
if (width / height) == ratio:
dim = (height, width)
else:
print('WARNING: Window resolution (' + str(width) + '*' + str(height)
+ ') does not agree with ratio ' + str(ratio) + '. The default was used.')
return cv2.resize(frame, (self.round_up(dim[1]), self.round_up(dim[0])), interpolation=cv2.INTER_AREA)
@staticmethod
def round_up(num):
return int(-(-num // 1))
if __name__ == '__main__':
q = queue.Queue()
size = 20
cap = VideoStream().start()
init_frame = cap.read()[1]
frame_height, frame_width = init_frame.shape[:2]
while cap.isOpened():
ret, frame = cap.read()
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
Thread(target=avg_color, args=(frame, size, frame_height, frame_width, q)).start()
Thread(target=t2s_say, args=(q.get(), q)).start()
cv2.destroyAllWindows()
'''
To add:
1- Fixes (see below)
To fix:
1- See #1.NB[1-3] in BETA.TestCode.OpenCV.VideoCap3.py
v1.6
''' | gpl-3.0 | -4,433,979,133,669,497,000 | 34.092199 | 110 | 0.538508 | false |
wangtaoking1/hummer | backend/kubernetes/k8sclient.py | 1 | 9953 | import json
import requests
import logging
from backend.kubernetes.namespace import Namespace
from backend.kubernetes.replicationcontroller import Controller
from backend.kubernetes.service import Service
from backend.kubernetes.volume import PersistentVolume, PersistentVolumeClaim
from backend.kubernetes.autoscaler import AutoScaler
logger = logging.getLogger('hummer')
class KubeClient(object):
"""
Kubernetes simple python client.
API: http://kubernetes.io/third_party/swagger-ui/
"""
_base_url = None
def __init__(self, base_url):
self._base_url = self.add_slash(base_url)
@classmethod
def add_slash(self, url):
"""
Promote that the base url ends with '/'.
"""
if url.endswith('/'):
return url
return url + '/'
@property
def base_url(self):
return self._base_url
def _send_request(self, method, path, label=None, query=None, body=None):
"""
Send requests to k8s server and get the response.
Returns a response dict.
Parameters:
query: str, "app=name"
"""
url = self._base_url + path
if label:
url = '{}?labelSelector={}'.format(url, label)
if query:
url = url + '?' + query
kwargs = {}
if body:
kwargs['data'] = json.dumps(body)
try:
res = getattr(requests, method.lower())(url, **kwargs)
except Exception as error:
logger.error(error)
logger.error(res)
return None
try:
response = json.loads(res.text)
return response
except Exception as error:
return res.text
def list_nodes(self):
"""
List all nodes.
"""
res = self._send_request('GET', 'nodes')
nodes = []
for item in res.get('items'):
nodes.append(item['metadata']['name'])
return nodes
def list_namespces(self):
"""
List all namespaces.
"""
res = self._send_request('GET', 'namespaces')
namespaces = []
for item in res.get('items'):
namespaces.append(item['metadata']['name'])
return namespaces
def create_namespace(self, name):
"""
Create namespace called name.
"""
namespace = Namespace(name)
response = self._send_request('POST', 'namespaces', body=namespace.body)
return self._is_creating_deleting_successful(response)
def delete_namespace(self, name):
"""
Delete namespace called name.
"""
response = self._send_request('DELETE', 'namespaces/{}'.format(name))
return self._is_creating_deleting_successful(response)
def create_persistentvolume(self, namespace, name, capacity, nfs_path,
nfs_server):
"""
Create persistentvolume called namespace-name.
"""
volume_name = namespace + '-' + name
volume = PersistentVolume(volume_name, capacity, nfs_path, nfs_server)
response = self._send_request('POST', 'persistentvolumes',
body=volume.body)
return self._is_creating_deleting_successful(response)
def delete_persistentvolume(self, namespace, name):
"""
Delete persistentvolume called namespace-name.
"""
volume_name = namespace + '-' + name
response = self._send_request('DELETE', 'persistentvolumes/{}'.format(
volume_name))
return self._is_creating_deleting_successful(response)
def create_persistentvolumeclaim(self, namespace, name, capacity):
"""
Create persistentvolumeclaim called name.
"""
volume_name = namespace + '-' + name
volumeclaim = PersistentVolumeClaim(volume_name, capacity)
response = self._send_request('POST',
'namespaces/{}/persistentvolumeclaims'.format(namespace),
body=volumeclaim.body)
return self._is_creating_deleting_successful(response)
def delete_persistentvolumeclaim(self, namespace, name):
"""
Delete persistentvolumeclaim called name.
"""
volume_name = namespace + '-' + name
response = self._send_request('DELETE',
'namespaces/{}/persistentvolumeclaims/{}'.format(namespace,
volume_name))
return self._is_creating_deleting_successful(response)
def list_controllers(self, namespace):
"""
List all replicationcontroller in the namespace.
"""
path = 'namespaces/{}/replicationcontrollers'.format(namespace)
res = self._send_request('GET', path)
controllers = []
for item in res.get('items'):
controllers.append(item['metadata']['name'])
return controllers
def create_controller(self, namespace, name, image_name, cpu, memory,
replicas=1, tcp_ports=None, udp_ports=None, commands=None, args=None,
envs=None, volumes=None):
"""
Create a replicationcontroller.
"""
controller = Controller(name, image_name, cpu, memory, replicas,
tcp_ports, udp_ports, commands, args, envs, volumes)
path = 'namespaces/{}/replicationcontrollers'.format(namespace)
# logger.debug(controller.body)
response = self._send_request('POST', path, body=controller.body)
return self._is_creating_deleting_successful(response)
def delete_controller(self, namespace, name):
"""
Delete a replicationcontroller.
"""
path = 'namespaces/{}/replicationcontrollers/{}'.format(namespace, name)
response = self._send_request('DELETE', path)
return self._is_creating_deleting_successful(response)
def list_pods(self, namespace, label=None):
"""
List pods by label.
Parameters:
label: str, "app=name"
"""
path = 'namespaces/{}/pods/'.format(namespace)
response = self._send_request('GET', path, label=label)
# logger.debug(response)
pods = []
for item in response.get('items'):
pods.append(item['metadata']['name'])
return pods
def list_host_ips(self, namespace, label=None):
"""
List all host ips for a controller.
Parameters:
label: str, "app=name"
"""
path = 'namespaces/{}/pods/'.format(namespace)
response = self._send_request('GET', path, label=label)
# logger.debug(response)
hosts = set()
for pod in response.get('items', []):
hosts.add(pod['spec']['nodeName'])
return list(hosts)
def delete_pod(self, namespace, name):
"""
Delete a pod.
"""
path = 'namespaces/{}/pods/{}'.format(namespace, name)
response = self._send_request('DELETE', path)
return self._is_creating_deleting_successful(response)
def list_services(self, namespace):
"""
List all services in the namespace.
"""
path = 'namespaces/{}/services'.format(namespace)
res = self._send_request('GET', path)
services = []
for item in res.get('items'):
services.append(item['metadata']['name'])
return services
def create_service(self, namespace, name, tcp_ports=None, udp_ports=None,
is_public=False, session_affinity=False):
"""
Create a service in namespace.
"""
service = Service(name, tcp_ports, udp_ports, is_public,
session_affinity)
path = 'namespaces/{}/services'.format(namespace)
# logger.debug(service.body)
response = self._send_request('POST', path, body=service.body)
return self._is_creating_deleting_successful(response)
def delete_service(self, namespace, name):
"""
Delete a service.
"""
path = 'namespaces/{}/services/{}'.format(namespace, name)
response = self._send_request('DELETE', path)
return self._is_creating_deleting_successful(response)
def get_service_details(self, namespace, name):
"""
Get the details of a service.
"""
path = 'namespaces/{}/services/{}'.format(namespace, name)
response = self._send_request('GET', path)
return response
def _is_creating_deleting_successful(self, response):
"""
Check the response to determinate whether creating and resource
successfully.
"""
status = response['status']
if isinstance(status, str) and status == 'Failure':
logger.debug(response['message'])
return False
return True
def get_logs_of_pod(self, namespace, pod_name, tail_line):
"""
Return the tail tail_line lines of logs of pod named pod_name.
"""
path = 'namespaces/{}/pods/{}/log'.format(namespace, pod_name)
query = "tailLines=" + str(tail_line)
response = self._send_request('GET', path, query=query)
return response.split('\n')
def create_autoscaler(self, namespace, name, minReplicas=-1, maxReplicas=-1,
cpu_target=-1):
"""
Create an autoscaler name in namespace namespace.
"""
scaler = AutoScaler(name, minReplicas, maxReplicas, cpu_target)
path = 'namespaces/{}/horizontalpodautoscalers'.format(namespace)
# logger.debug(scaler.body)
response = self._send_request('POST', path, body=scaler.body)
return self._is_creating_deleting_successful(response)
def delete_autoscaler(self, namespace, name):
"""
Delete the autoscaler name.
"""
path = 'namespaces/{}/horizontalpodautoscalers/{}'.format(namespace,
name)
response = self._send_request('DELETE', path)
return self._is_creating_deleting_successful(response)
| apache-2.0 | -1,566,903,951,912,053,800 | 32.969283 | 80 | 0.594394 | false |
martijnvermaat/rpclib | src/rpclib/util/etreeconv.py | 1 | 3585 |
#
# rpclib - Copyright (C) Rpclib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""This module contains the utility methods that convert an ElementTree
hierarchy to python dicts and vice versa.
"""
from lxml import etree
from rpclib.util.odict import odict
def root_dict_to_etree(d):
"""Converts a dictionary to an xml hiearchy. Just like a valid xml document,
the dictionary must have a single element. The format of the child
dictionaries is the same as :func:`dict_to_etree`.
"""
assert len(d) == 1
key, = d.keys()
retval = etree.Element(key)
for val in d.values():
break
if isinstance(val, dict) or isinstance(val, odict):
dict_to_etree(val, retval)
else:
for a in val:
dict_to_etree(a, retval)
return retval
def dict_to_etree(d, parent):
"""Takes a the dict whose value is either None or an instance of dict, odict
or an iterable. The iterables can contain either other dicts/odicts or
str/unicode instances.
"""
for k, v in d.items():
if v is None or len(v) == 0:
etree.SubElement(parent, k)
elif isinstance(v, dict) or isinstance(v, odict):
child = etree.SubElement(parent, k)
dict_to_etree(v, child)
else:
for e in v:
child=etree.SubElement(parent, k)
if isinstance(e, dict) or isinstance(e, odict):
dict_to_etree(e, child)
else:
child.text=str(e)
def root_etree_to_dict(element, iterable=(list, list.append)):
"""Takes an xml root element and returns the corresponding dict. The second
argument is a pair of iterable type and the function used to add elements to
the iterable. The xml attributes are ignored.
"""
return {element.tag: iterable[0]([etree_to_dict(element, iterable)])}
def etree_to_dict(element, iterable=(list, list.append)):
"""Takes an xml root element and returns the corresponding dict. The second
argument is a pair of iterable type and the function used to add elements to
the iterable. The xml attributes are ignored.
"""
if (element.text is None) or element.text.isspace():
retval = odict()
for elt in element:
if not (elt.tag in retval):
retval[elt.tag] = iterable[0]()
iterable[1](retval[elt.tag], etree_to_dict(elt, iterable))
else:
retval = element.text
return retval
def etree_strip_namespaces(element):
"""Removes any namespace information form the given element recursively."""
retval = etree.Element(element.tag.rpartition('}')[-1])
retval.text = element.text
for a in element.attrib:
retval.attrib[a.rpartition('}')[-1]] = element.attrib[a]
for e in element:
retval.append(etree_strip_namespaces(e))
return retval
| lgpl-2.1 | 138,900,269,373,878,800 | 32.194444 | 80 | 0.660251 | false |
prymitive/upaas-admin | upaas_admin/apps/applications/models.py | 1 | 31729 | # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2013-2014 by Łukasz Mierzwa
:contact: [email protected]
"""
from __future__ import unicode_literals
import os
import datetime
import logging
import tempfile
import shutil
import time
import re
from copy import deepcopy
from mongoengine import (Document, DateTimeField, StringField, LongField,
ReferenceField, ListField, DictField, QuerySetManager,
BooleanField, IntField, NULLIFY, signals)
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.conf import settings
from upaas import utils
from upaas import tar
from upaas.checksum import calculate_file_sha256, calculate_string_sha256
from upaas.config.base import UPAAS_CONFIG_DIRS
from upaas.config.metadata import MetadataConfig
from upaas.storage.exceptions import StorageError
from upaas import processes
from upaas.utils import load_handler
from upaas_admin.apps.servers.models import RouterServer, BackendServer
from upaas_admin.apps.scheduler.models import ApplicationRunPlan
from upaas_admin.apps.applications.exceptions import UnpackError
from upaas_admin.apps.scheduler.base import Scheduler
from upaas_admin.apps.tasks.constants import TaskStatus
from upaas_admin.apps.tasks.models import Task
from upaas_admin.apps.applications.constants import (
NeedsBuildingFlag, NeedsStoppingFlag, NeedsRestartFlag, IsStartingFlag,
NeedsUpgradeFlag, FLAGS_BY_NAME)
from upaas_admin.apps.applications.helpers import (
ApplicationStateHelper, ApplicationFeatureHelper)
log = logging.getLogger(__name__)
class Package(Document):
date_created = DateTimeField(required=True, default=datetime.datetime.now)
metadata = StringField(help_text=_('Application metadata'))
application = ReferenceField('Application', dbref=False, required=True)
task = ReferenceField(Task, dbref=False)
interpreter_name = StringField(required=True)
interpreter_version = StringField(required=True)
parent = StringField()
parent_package = ReferenceField('Package')
filename = StringField()
bytes = LongField(required=True)
checksum = StringField(required=True)
builder = StringField(required=True)
distro_name = StringField(required=True)
distro_version = StringField(required=True)
distro_arch = StringField(required=True)
revision_id = StringField()
revision_author = StringField()
revision_date = DateTimeField()
revision_description = StringField()
revision_changelog = StringField()
ack_filename = '.upaas-unpacked'
meta = {
'indexes': ['filename'],
'ordering': ['-date_created'],
}
_default_manager = QuerySetManager()
@classmethod
def pre_delete(cls, sender, document, **kwargs):
log.debug(_("Pre delete signal on package {id}").format(
id=document.safe_id))
Application.objects(id=document.application.id).update_one(
pull__packages=document.id)
document.delete_package_file(null_filename=False)
@property
def safe_id(self):
return str(self.id)
@property
def metadata_config(self):
if self.metadata:
return MetadataConfig.from_string(self.metadata)
return {}
@property
def upaas_config(self):
return settings.UPAAS_CONFIG
@property
def package_path(self):
"""
Unpacked package directory path
"""
return os.path.join(settings.UPAAS_CONFIG.paths.apps, self.safe_id)
@property
def ack_path(self):
return os.path.join(self.package_path, self.ack_filename)
def delete_package_file(self, null_filename=True):
log.debug(_("Deleting package file for {pkg}").format(
pkg=self.safe_id))
if not self.filename:
log.debug(_("Package {pkg} has no filename, skipping "
"delete").format(pkg=self.safe_id))
return
storage = load_handler(self.upaas_config.storage.handler,
self.upaas_config.storage.settings)
if not storage:
log.error(_("Storage handler '{handler}' not found, cannot "
"package file").format(
handler=self.upaas_config.storage.handler))
return
log.debug(_("Checking if package file {path} is stored").format(
path=self.filename))
if storage.exists(self.filename):
log.info(_("Removing package {pkg} file from storage").format(
pkg=self.safe_id))
storage.delete(self.filename)
if null_filename:
log.info(_("Clearing filename for package {pkg}").format(
pkg=self.safe_id))
del self.filename
self.save()
def uwsgi_options_from_metadata(self):
"""
Parse uWSGI options in metadata (if any) and return only allowed.
"""
options = []
compiled = []
for regexp in self.upaas_config.apps.uwsgi.safe_options:
compiled.append(re.compile(regexp))
for opt in self.metadata_config.uwsgi.settings:
if '=' in opt:
for regexp in compiled:
opt_name = opt.split('=')[0].rstrip(' ')
if regexp.match(opt_name):
options.append(opt)
log.debug(_("Adding safe uWSGI option from metadata: "
"{opt}").format(opt=opt))
break
return options
def generate_uwsgi_config(self, backend_conf):
"""
:param backend_conf: BackendRunPlanSettings instance for which we
generate config
"""
def _load_template(path):
log.debug("Loading uWSGI template from: %s" % path)
for search_path in UPAAS_CONFIG_DIRS:
template_path = os.path.join(search_path, path)
if os.path.exists(template_path):
f = open(template_path)
ret = f.read().splitlines()
f.close()
return ret
return []
# so it won't change while generating configuration
config = deepcopy(self.upaas_config)
base_template = config.interpreters['uwsgi']['template']
template = None
try:
template_any = config.interpreters[self.interpreter_name]['any'][
'uwsgi']['template']
except (AttributeError, KeyError):
pass
else:
if template_any:
template = template_any
try:
template_version = config.interpreters[self.interpreter_name][
self.interpreter_version]['uwsgi']['template']
except (AttributeError, KeyError):
pass
else:
if template_version:
template = template_version
max_memory = backend_conf.workers_max
max_memory *= self.application.run_plan.memory_per_worker
max_memory *= 1024 * 1024
variables = {
'namespace': self.package_path,
'chdir': config.apps.home,
'socket': '%s:%d' % (backend_conf.backend.ip, backend_conf.socket),
'stats': '%s:%d' % (backend_conf.backend.ip, backend_conf.stats),
'uid': config.apps.uid,
'gid': config.apps.gid,
'app_name': self.application.name,
'app_id': self.application.safe_id,
'pkg_id': self.safe_id,
'max_workers': backend_conf.workers_max,
'max_memory': max_memory,
'memory_per_worker': self.application.run_plan.memory_per_worker,
'max_log_size':
self.application.run_plan.max_log_size * 1024 * 1024,
}
if config.apps.graphite.carbon:
variables['carbon_servers'] = ' '.join(
config.apps.graphite.carbon)
variables['carbon_timeout'] = config.apps.graphite.timeout
variables['carbon_frequency'] = config.apps.graphite.frequency
variables['carbon_max_retry'] = config.apps.graphite.max_retry
variables['carbon_retry_delay'] = config.apps.graphite.retry_delay
variables['carbon_root'] = config.apps.graphite.root
try:
variables.update(config.interpreters[self.interpreter_name]['any'][
'uwsgi']['vars'])
except (AttributeError, KeyError):
pass
try:
variables.update(config.interpreters[self.interpreter_name][
self.interpreter_version]['uwsgi']['vars'])
except (AttributeError, KeyError):
pass
# interpretere default settings for any version
try:
for key, value in list(config.interpreters[self.interpreter_name][
'any']['settings'].items()):
var_name = "meta_%s_%s" % (self.interpreter_name, key)
variables[var_name] = value
except (AttributeError, KeyError):
pass
# interpretere default settings for current version
try:
for key, value in list(config.interpreters[self.interpreter_name][
self.interpreter_version]['settings'].items()):
var_name = "meta_%s_%s" % (self.interpreter_name, key)
variables[var_name] = value
except (AttributeError, KeyError):
pass
# interpreter settings from metadata
try:
for key, val in list(
self.metadata_config.interpreter.settings.items()):
var_name = "meta_%s_%s" % (self.interpreter_name, key)
variables[var_name] = val
except KeyError:
pass
envs = {}
try:
envs.update(config.interpreters[self.interpreter_name]['any'][
'env'])
except (AttributeError, KeyError):
pass
try:
envs.update(config.interpreters[self.interpreter_name][
self.interpreter_version]['env'])
except (AttributeError, KeyError):
pass
envs.update(self.metadata_config.env)
plugin = None
try:
plugin = config.interpreters[self.interpreter_name]['any'][
'uwsgi']['plugin']
except (AttributeError, KeyError):
pass
try:
plugin = config.interpreters[self.interpreter_name][
self.interpreter_version]['uwsgi']['plugin']
except (AttributeError, KeyError):
pass
options = ['[uwsgi]']
options.append('\n# starting uWSGI config variables list')
for key, value in list(variables.items()):
options.append('var_%s = %s' % (key, value))
for feature in self.application.feature_helper.load_enabled_features():
envs = feature.update_env(self.application, envs)
options.append('\n# starting ENV variables list')
for key, value in list(envs.items()):
options.append('env = %s=%s' % (key, value))
options.append(
'env = UPAAS_SYSTEM_DOMAIN=%s' % self.application.system_domain)
if self.application.custom_domains:
options.append('env = UPAAS_CUSTOM_DOMAINS=%s' % ','.join(
[d.name for d in self.application.custom_domains]))
options.append('\n# starting options from app metadata')
for opt in self.uwsgi_options_from_metadata():
options.append(opt)
# enable cheaper mode if we have multiple workers
if backend_conf.workers_max > backend_conf.workers_min:
options.append('\n# enabling cheaper mode')
options.append('cheaper = %d' % backend_conf.workers_min)
options.append('\n# starting base template')
options.extend(_load_template(base_template))
if config.apps.graphite.carbon:
options.append('\n# starting carbon servers block')
for carbon in config.apps.graphite.carbon:
options.append('carbon = %s' % carbon)
options.append('\n# starting interpreter plugin')
if plugin:
options.append('plugin = %s' % plugin)
options.append('\n# starting interpreter template')
options.extend(_load_template(template))
options.append('\n# starting subscriptions block')
for router in RouterServer.objects(is_enabled=True):
options.append('subscribe2 = server=%s:%d,key=%s' % (
router.subscription_ip, router.subscription_port,
self.application.system_domain))
for domain in self.application.custom_domains:
options.append('subscribe2 = server=%s:%d,key=%s' % (
router.subscription_ip, router.subscription_port,
domain.name))
options.append('\n')
for feature in self.application.feature_helper.load_enabled_features():
options = feature.update_vassal(self.application, options)
options.append('\n')
return options
def check_vassal_config(self, options):
"""
Verify is there is uWSGI vassal configuration file and if it doesn't
need updating.
"""
if os.path.exists(self.application.vassal_path):
current_hash = calculate_file_sha256(self.application.vassal_path)
new_hash = calculate_string_sha256(options)
if current_hash == new_hash:
return True
return False
def save_vassal_config(self, backend):
log.info(_("Generating uWSGI vassal configuration"))
options = "\n".join(self.generate_uwsgi_config(backend))
if self.check_vassal_config(options):
log.info("Vassal is present and valid, skipping rewrite")
return
log.info(_("Saving vassal configuration to {path}").format(
path=self.application.vassal_path))
with open(self.application.vassal_path, 'w') as vassal:
vassal.write(options)
log.info(_("Vassal saved"))
def unpack(self):
# directory is encoded into string to prevent unicode errors
directory = tempfile.mkdtemp(dir=self.upaas_config.paths.workdir,
prefix="upaas_package_").encode("utf-8")
storage = load_handler(self.upaas_config.storage.handler,
self.upaas_config.storage.settings)
if not storage:
log.error("Storage handler '%s' not "
"found" % self.upaas_config.storage.handler)
workdir = os.path.join(directory, "system")
pkg_path = os.path.join(directory, self.filename)
if os.path.exists(self.package_path):
log.error(_("Package directory already exists: {path}").format(
path=self.package_path))
raise UnpackError(_("Package directory already exists"))
log.info("Fetching package '%s'" % self.filename)
try:
storage.get(self.filename, pkg_path)
except StorageError:
log.error(_("Storage error while fetching package {name}").format(
name=self.filename))
utils.rmdirs(directory)
raise UnpackError(_("Storage error while fetching package "
"{name}").format(name=self.filename))
log.info("Unpacking package")
os.mkdir(workdir, 0o755)
if not tar.unpack_tar(pkg_path, workdir):
log.error(_("Error while unpacking package to '{workdir}'").format(
workdir=workdir))
utils.rmdirs(directory)
raise UnpackError(_("Error during package unpack"))
with open(os.path.join(workdir, self.ack_filename), 'w') as ack:
ack.write(_('Unpacked: {now}').format(now=datetime.datetime.now()))
for feature in self.application.feature_helper.load_enabled_features():
feature.after_unpack(self.application, workdir)
log.info(_("Package unpacked, moving into '{path}'").format(
path=self.package_path))
try:
shutil.move(workdir, self.package_path)
except shutil.Error as e:
log.error(_("Error while moving unpacked package to final "
"destination: e").format(e=e))
utils.rmdirs(directory, self.package_path)
raise UnpackError(_("Can't move to final directory: "
"{path}").format(path=self.package_path))
log.info(_("Package moved"))
utils.rmdirs(directory)
class ApplicationDomain(Document):
date_created = DateTimeField(required=True, default=datetime.datetime.now)
application = ReferenceField('Application', dbref=False, required=True)
name = StringField(required=True, unique=True, min_length=4, max_length=64)
validated = BooleanField()
meta = {
'indexes': ['application']
}
@property
def safe_id(self):
return str(self.id)
class FlagLock(Document):
date_created = DateTimeField(required=True, default=datetime.datetime.now)
application = ReferenceField('Application', dbref=False, required=True)
flag = StringField(required=True)
backend = ReferenceField(BackendServer, reverse_delete_rule=NULLIFY)
pid = IntField(required=True)
meta = {
'indexes': [
{'fields': ['application', 'flag', 'backend'], 'unique': True},
],
'ordering': ['-date_created'],
}
class ApplicationFlag(Document):
date_created = DateTimeField(required=True, default=datetime.datetime.now)
application = ReferenceField('Application', dbref=False, required=True)
name = StringField(required=True, unique_with='application')
options = DictField()
pending = BooleanField(default=True)
pending_backends = ListField(ReferenceField(BackendServer))
meta = {
'indexes': [
{'fields': ['name', 'application'], 'unique': True},
{'fields': ['name']},
{'fields': ['pending']},
],
'ordering': ['-date_created'],
}
@property
def title(self):
return FLAGS_BY_NAME.get(self.name).title
class Application(Document):
date_created = DateTimeField(required=True, default=datetime.datetime.now)
name = StringField(required=True, min_length=2, max_length=60,
unique_with='owner', verbose_name=_('name'))
# FIXME reverse_delete_rule=DENY for owner
owner = ReferenceField('User', dbref=False, required=True)
metadata = StringField(verbose_name=_('Application metadata'),
required=True)
current_package = ReferenceField(Package, dbref=False, required=False)
packages = ListField(ReferenceField(Package, dbref=False,
reverse_delete_rule=NULLIFY))
run_plan = ReferenceField('ApplicationRunPlan', dbref=False)
_default_manager = QuerySetManager()
meta = {
'indexes': [
{'fields': ['name', 'owner'], 'unique': True},
{'fields': ['packages']},
],
'ordering': ['name'],
}
def __init__(self, *args, **kwargs):
super(Application, self).__init__(*args, **kwargs)
self.state_helper = ApplicationStateHelper(self)
self.feature_helper = ApplicationFeatureHelper(self)
@property
def safe_id(self):
return str(self.id)
@property
def metadata_config(self):
if self.metadata:
return MetadataConfig.from_string(self.metadata)
return {}
@property
def upaas_config(self):
return settings.UPAAS_CONFIG
@property
def vassal_path(self):
"""
Application vassal config file path.
"""
return os.path.join(self.upaas_config.paths.vassals,
'%s.ini' % self.safe_id)
@property
def interpreter_name(self):
"""
Will return interpreter from current package metadata.
If no package was built interpreter will be fetched from app metadata.
If app has no metadata it will return None.
"""
if self.current_package:
return self.current_package.interpreter_name
else:
try:
return self.metadata_config.interpreter.type
except KeyError:
return None
@property
def interpreter_version(self):
"""
Will return interpreter version from current package metadata.
If no package was built interpreter will be fetched from app metadata.
If app has no metadata it will return None.
"""
if self.current_package:
return self.current_package.interpreter_version
elif self.metadata:
return utils.select_best_version(self.upaas_config,
self.metadata_config)
@property
def supported_interpreter_versions(self):
"""
Return list of interpreter versions that this app can run.
"""
if self.metadata:
return sorted(list(utils.supported_versions(
self.upaas_config, self.metadata_config).keys()), reverse=True)
@property
def can_start(self):
"""
Returns True only if package is not started but it can be.
"""
return bool(self.current_package and self.run_plan is None)
@property
def tasks(self):
"""
List of all tasks for this application.
"""
return Task.objects(application=self)
@property
def running_tasks(self):
"""
List of all running tasks for this application.
"""
return self.tasks.filter(status=TaskStatus.running)
@property
def build_tasks(self):
"""
List of all build tasks for this application.
"""
return self.tasks.filter(flag=NeedsBuildingFlag.name)
@property
def running_build_tasks(self):
"""
Returns list of running build tasks for this application.
"""
return self.build_tasks.filter(status=TaskStatus.running)
@property
def flags(self):
"""
Return list of application flags.
"""
return ApplicationFlag.objects(application=self)
@property
def system_domain(self):
"""
Returns automatic system domain for this application.
"""
return '%s.%s' % (self.safe_id, self.upaas_config.apps.domains.system)
@property
def custom_domains(self):
"""
List of custom domains assigned for this application.
"""
return ApplicationDomain.objects(application=self)
@property
def domain_validation_code(self):
"""
String used for domain ownership validation.
"""
return "upaas-app-id=%s" % self.safe_id
def get_absolute_url(self):
return reverse('app_details', args=[self.safe_id])
def build_package(self, force_fresh=False, interpreter_version=None):
q = {
'set__options__{0:s}'.format(
NeedsBuildingFlag.Options.build_fresh_package): force_fresh,
'set__options__{0:s}'.format(
NeedsBuildingFlag.Options.build_interpreter_version):
interpreter_version,
'unset__pending': True,
'upsert': True
}
ApplicationFlag.objects(application=self,
name=NeedsBuildingFlag.name).update_one(**q)
def start_application(self):
if self.current_package:
if not self.run_plan:
log.error("Trying to start '%s' without run plan" % self.name)
return
scheduler = Scheduler()
backends = scheduler.find_backends(self.run_plan)
if not backends:
log.error(_("Can't start '{name}', no backend "
"available").format(name=self.name))
self.run_plan.delete()
return
self.run_plan.update(set__backends=backends)
ApplicationFlag.objects(
application=self, name=IsStartingFlag.name).update_one(
set__pending_backends=[b.backend for b in backends],
upsert=True)
# FIXME what if there are waiting stop tasks on other backends ?
self.flags.filter(name=NeedsStoppingFlag.name).delete()
def stop_application(self):
if self.current_package:
if not self.run_plan:
return
if self.run_plan and not self.run_plan.backends:
# no backends in run plan, just delete it
self.run_plan.delete()
return
ApplicationFlag.objects(
application=self, name=NeedsStoppingFlag.name).update_one(
set__pending_backends=[
b.backend for b in self.run_plan.backends], upsert=True)
self.flags.filter(
name__in=[IsStartingFlag.name, NeedsRestartFlag.name]).delete()
def restart_application(self):
if self.current_package:
if not self.run_plan:
return
ApplicationFlag.objects(
application=self, name=NeedsRestartFlag.name).update_one(
set__pending_backends=[
b.backend for b in self.run_plan.backends], upsert=True)
def upgrade_application(self):
if self.current_package:
if not self.run_plan:
return
ApplicationFlag.objects(
application=self, name=NeedsUpgradeFlag.name).update_one(
set__pending_backends=[
b.backend for b in self.run_plan.backends], upsert=True)
def update_application(self):
if self.run_plan:
current_backends = [bc.backend for bc in self.run_plan.backends]
scheduler = Scheduler()
new_backends = scheduler.find_backends(self.run_plan)
if not new_backends:
log.error(_("Can't update '{name}', no backend "
"available").format(name=self.name))
return
updated_backends = []
for backend_conf in new_backends:
if backend_conf.backend in current_backends:
# replace backend settings with updated version
self.run_plan.update(
pull__backends__backend=backend_conf.backend)
self.run_plan.update(push__backends=backend_conf)
updated_backends.append(backend_conf.backend)
else:
# add backend to run plan if not already there
ApplicationRunPlan.objects(
id=self.run_plan.id,
backends__backend__nin=[
backend_conf.backend]).update_one(
push__backends=backend_conf)
log.info(_("Starting {name} on backend {backend}").format(
name=self.name, backend=backend_conf.backend.name))
ApplicationFlag.objects(
pending_backends__ne=backend_conf.backend,
application=self,
name=IsStartingFlag.name).update_one(
add_to_set__pending_backends=backend_conf.backend,
upsert=True)
if updated_backends:
ApplicationFlag.objects(
application=self, name=NeedsRestartFlag.name).update_one(
set__pending_backends=updated_backends, upsert=True)
for backend in current_backends:
if backend not in [bc.backend for bc in new_backends]:
log.info(_("Stopping {name} on old backend "
"{backend}").format(name=self.name,
backend=backend.name))
ApplicationFlag.objects(
pending_backends__ne=backend,
application=self,
name=NeedsStoppingFlag.name).update_one(
add_to_set__pending_backends=backend, upsert=True)
def trim_package_files(self):
"""
Removes over limit package files from database. Number of packages per
app that are kept in database for rollback feature are set in user
limits as 'packages_per_app'.
"""
storage = load_handler(self.upaas_config.storage.handler,
self.upaas_config.storage.settings)
if not storage:
log.error("Storage handler '%s' not found, cannot trim "
"packages" % self.upaas_config.storage.handler)
return
removed = 0
for pkg in Package.objects(application=self, filename__exists=True)[
self.owner.limits['packages_per_app']:]:
if pkg.id == self.current_package.id:
continue
removed += 1
pkg.delete_package_file(null_filename=True)
if removed:
log.info("Removed %d package file(s) for app %s" % (removed,
self.name))
def remove_unpacked_packages(self, exclude=None, timeout=None):
"""
Remove all but current unpacked packages
"""
if timeout is None:
timeout = self.upaas_config.commands.timelimit
log.info(_("Cleaning packages for {name}").format(name=self.name))
for pkg in self.packages:
if exclude and pkg.id in exclude:
# skip current package!
continue
if os.path.isdir(pkg.package_path):
log.info(_("Removing package directory {path}").format(
path=pkg.package_path))
# if there are running pids inside package dir we will need to
# wait this should only happen during upgrade, when we need to
# wait for app to reload into new package dir
started_at = datetime.datetime.now()
timeout_at = datetime.datetime.now() + datetime.timedelta(
seconds=timeout)
pids = processes.directory_pids(pkg.package_path)
while pids:
if datetime.datetime.now() > timeout_at:
log.error(_("Timeout reached while waiting for pids "
"in {path} to die, killing any remaining "
"processes").format(
path=pkg.package_path))
break
log.info(_("Waiting for {pids} pid(s) in {path} to "
"terminate").format(pids=len(pids),
path=pkg.package_path))
time.sleep(2)
pids = processes.directory_pids(pkg.package_path)
try:
processes.kill_and_remove_dir(pkg.package_path)
except OSError as e:
log.error(_("Exception during package directory cleanup: "
"{e}").format(e=e))
signals.pre_delete.connect(Package.pre_delete, sender=Package)
| gpl-3.0 | 8,420,968,401,472,349,000 | 36.952153 | 79 | 0.579078 | false |
googleapis/googleapis-gen | google/cloud/billing/budgets/v1/billing-budgets-v1-py/google/cloud/billing/budgets/__init__.py | 1 | 2304 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.billing.budgets_v1.services.budget_service.client import BudgetServiceClient
from google.cloud.billing.budgets_v1.services.budget_service.async_client import BudgetServiceAsyncClient
from google.cloud.billing.budgets_v1.types.budget_model import Budget
from google.cloud.billing.budgets_v1.types.budget_model import BudgetAmount
from google.cloud.billing.budgets_v1.types.budget_model import CustomPeriod
from google.cloud.billing.budgets_v1.types.budget_model import Filter
from google.cloud.billing.budgets_v1.types.budget_model import LastPeriodAmount
from google.cloud.billing.budgets_v1.types.budget_model import NotificationsRule
from google.cloud.billing.budgets_v1.types.budget_model import ThresholdRule
from google.cloud.billing.budgets_v1.types.budget_model import CalendarPeriod
from google.cloud.billing.budgets_v1.types.budget_service import CreateBudgetRequest
from google.cloud.billing.budgets_v1.types.budget_service import DeleteBudgetRequest
from google.cloud.billing.budgets_v1.types.budget_service import GetBudgetRequest
from google.cloud.billing.budgets_v1.types.budget_service import ListBudgetsRequest
from google.cloud.billing.budgets_v1.types.budget_service import ListBudgetsResponse
from google.cloud.billing.budgets_v1.types.budget_service import UpdateBudgetRequest
__all__ = ('BudgetServiceClient',
'BudgetServiceAsyncClient',
'Budget',
'BudgetAmount',
'CustomPeriod',
'Filter',
'LastPeriodAmount',
'NotificationsRule',
'ThresholdRule',
'CalendarPeriod',
'CreateBudgetRequest',
'DeleteBudgetRequest',
'GetBudgetRequest',
'ListBudgetsRequest',
'ListBudgetsResponse',
'UpdateBudgetRequest',
)
| apache-2.0 | -5,013,858,049,998,541,000 | 44.176471 | 105 | 0.796875 | false |
Fokko/incubator-airflow | airflow/hooks/hive_hooks.py | 1 | 39213 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import os
import re
import socket
import subprocess
import time
from collections import OrderedDict
from tempfile import NamedTemporaryFile
import unicodecsv as csv
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.security import utils
from airflow.utils.file import TemporaryDirectory
from airflow.utils.helpers import as_flattened_list
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
def get_context_from_env_var():
"""
Extract context from env variable, e.g. dag_id, task_id and execution_date,
so that they can be used inside BashOperator and PythonOperator.
:return: The context of interest.
"""
return {format_map['default']: os.environ.get(format_map['env_var_format'], '')
for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()}
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: str
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: str
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: str
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue or conf.get('hive',
'default_hive_mapred_queue')
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _get_proxy_user(self):
"""
This function set the proper proxy_user value in case the user overwtire the default.
"""
conn = self.conn
proxy_user_value = conn.extra_dejson.get('proxy_user', "")
if proxy_user_value == "login" and conn.login:
return "hive.server2.proxy.user={0}".format(conn.login)
if proxy_user_value == "owner" and self.run_as:
return "hive.server2.proxy.user={0}".format(self.run_as)
if proxy_user_value != "": # There is a custom proxy user
return "hive.server2.proxy.user={0}".format(proxy_user_value)
return proxy_user_value # The default proxy user (undefined)
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{host}:{port}/{schema}".format(
host=conn.host, port=conn.port, schema=conn.schema)
if conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = self._get_proxy_user()
jdbc_url += ";principal={template};{proxy_user}".format(
template=template, proxy_user=proxy_user)
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = '"{}"'.format(jdbc_url)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
@staticmethod
def _prepare_hiveconf(d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(schema=schema, hql=hql)
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
hql = hql + '\n'
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
env_context = get_context_from_env_var()
# Only extend the hive_conf if it is defined.
if hive_conf:
env_context.update(hive_conf)
hive_conf_params = self._prepare_hiveconf(env_context)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue),
'-hiveconf',
'mapred.job.queue.name={}'
.format(self.mapred_queue),
'-hiveconf',
'tez.queue.name={}'
.format(self.mapred_queue)
])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info("%s", " ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
self.log.info(message)
error_loc = re.search(r'(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
lst = int(error_loc.group(1))
begin = max(lst - 2, 0)
end = min(lst + 3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df,
table,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: pandas.DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: str encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'M': 'TIMESTAMP', # datetime
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
d = OrderedDict()
for col, dtype in df.dtypes.iteritems():
d[col] = DTYPE_KIND_HIVE_TYPE[dtype.kind]
return d
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs)
f.flush()
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n".format(table=table)
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
['`{k}` {v}'.format(k=k.strip('`'), v=v) for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n".format(
table=table, fields=fields)
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n".format(pfields=pfields)
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n".format(delimiter=delimiter)
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n".format(tprops=tprops)
hql += ";"
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' ".format(filepath=filepath)
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} ".format(table=table)
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals})".format(pvals=pvals)
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += ';\n'
self.log.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
# java short max val
MAX_PART_COUNT = 32767
def __init__(self, metastore_conn_id='metastore_default'):
self.conn_id = metastore_conn_id
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
import hmsclient
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
ms = self._find_valid_server()
if ms is None:
raise AirflowException("Failed to locate the valid server.")
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
conn_socket = TSocket.TSocket(ms.host, ms.port)
if conf.get('core', 'security') == 'kerberos' \
and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", conn_socket)
else:
transport = TTransport.TBufferedTransport(conn_socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol)
def _find_valid_server(self):
conns = self.get_connections(self.conn_id)
for conn in conns:
host_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.log.info("Trying to connect to %s:%s", conn.host, conn.port)
if host_socket.connect_ex((conn.host, conn.port)) == 0:
self.log.info("Connected to %s:%s", conn.host, conn.port)
host_socket.close()
return conn
else:
self.log.info("Could not connect to %s:%s", conn.host, conn.port)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
with self.metastore as client:
partitions = client.get_partitions_by_filter(
schema, table, partition, 1)
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name)
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
with self.metastore as client:
return client.get_table(dbname=db, tbl_name=table_name)
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
tables = client.get_tables(db_name=db, pattern=pattern)
return client.get_table_objects_by_name(db, tables)
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
return client.get_databases(pattern)
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = client.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=HiveMetastoreHook.MAX_PART_COUNT)
else:
parts = client.get_partitions(
db_name=schema, tbl_name=table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
@staticmethod
def _get_max_partition_from_part_specs(part_specs, partition_key, filter_map):
"""
Helper method to get max partition of partitions with partition_key
from part specs. key:value pair in filter_map will be used to
filter out partitions.
:param part_specs: list of partition specs.
:type part_specs: list
:param partition_key: partition key name.
:type partition_key: str
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:type filter_map: map
:return: Max partition or None if part_specs is empty.
"""
if not part_specs:
return None
# Assuming all specs have the same keys.
if partition_key not in part_specs[0].keys():
raise AirflowException("Provided partition_key {} "
"is not in part_specs.".format(partition_key))
if filter_map:
is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys()))
if filter_map and not is_subset:
raise AirflowException("Keys in provided filter_map {} "
"are not subset of part_spec keys: {}"
.format(', '.join(filter_map.keys()),
', '.join(part_specs[0].keys())))
candidates = [p_dict[partition_key] for p_dict in part_specs
if filter_map is None or
all(item in p_dict.items() for item in filter_map.items())]
if not candidates:
return None
else:
return max(candidates).encode('utf-8')
def max_partition(self, schema, table_name, field=None, filter_map=None):
"""
Returns the maximum value for all partitions with given field in a table.
If only one partition key exist in the table, the key will be used as field.
filter_map should be a partition_key:partition_value map and will be used to
filter out partitions.
:param schema: schema name.
:type schema: str
:param table_name: table name.
:type table_name: str
:param field: partition key to get max partition from.
:type field: str
:param filter_map: partition_key:partition_value map used for partition filtering.
:type filter_map: map
>>> hh = HiveMetastoreHook()
>>> filter_map = {'ds': '2015-01-01', 'ds': '2014-01-01'}
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow',\
... table_name=t, field='ds', filter_map=filter_map)
'2015-01-01'
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
key_name_set = {key.name for key in table.partitionKeys}
if len(table.partitionKeys) == 1:
field = table.partitionKeys[0].name
elif not field:
raise AirflowException("Please specify the field you want the max "
"value for.")
elif field not in key_name_set:
raise AirflowException("Provided field is not a partition key.")
if filter_map and not set(filter_map.keys()).issubset(key_name_set):
raise AirflowException("Provided filter_map contains keys "
"that are not partition key.")
part_names = \
client.get_partition_names(schema,
table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
part_specs = [client.partition_name_to_spec(part_name)
for part_name in part_names]
return HiveMetastoreHook._get_max_partition_from_part_specs(part_specs,
field,
filter_map)
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the pyhive library
Notes:
* the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI
* the default for run_set_variable_statements is true, if you
are using impala you may need to set it to false in the
``extra`` of your connection in the UI
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
"""
Returns a Hive connection object.
"""
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'NONE')
if auth_mechanism == 'NONE' and db.login is None:
# we need to give a username
username = 'airflow'
kerberos_service_name = None
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'GSSAPI':
self.log.warning(
"Detected deprecated 'GSSAPI' for authMechanism "
"for %s. Please use 'KERBEROS' instead",
self.hiveserver2_conn_id
)
auth_mechanism = 'KERBEROS'
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
password=db.password,
database=schema or db.schema or 'default')
def _get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
from pyhive.exc import ProgrammingError
if isinstance(hql, str):
hql = [hql]
previous_description = None
with contextlib.closing(self.get_conn(schema)) as conn, \
contextlib.closing(conn.cursor()) as cur:
cur.arraysize = fetch_size or 1000
# not all query services (e.g. impala AIRFLOW-4434) support the set command
db = self.get_connection(self.hiveserver2_conn_id)
if db.extra_dejson.get('run_set_variable_statements', True):
env_context = get_context_from_env_var()
if hive_conf:
env_context.update(hive_conf)
for k, v in env_context.items():
cur.execute("set {}={}".format(k, v))
for statement in hql:
cur.execute(statement)
# we only get results of statements that returns
lowered_statement = statement.lower().strip()
if (lowered_statement.startswith('select') or
lowered_statement.startswith('with') or
lowered_statement.startswith('show') or
(lowered_statement.startswith('set') and
'=' not in lowered_statement)):
description = [c for c in cur.description]
if previous_description and previous_description != description:
message = '''The statements are producing different descriptions:
Current: {}
Previous: {}'''.format(repr(description),
repr(previous_description))
raise ValueError(message)
elif not previous_description:
previous_description = description
yield description
try:
# DB API 2 raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
yield from cur
except ProgrammingError:
self.log.debug("get_results returned no records")
def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {
'data': list(results_iter),
'header': header
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000,
hive_conf=None):
"""
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:type hql: str or list
:param csv_filepath: filepath of csv to write results into.
:type csv_filepath: str
:param schema: target schema, default to 'default'.
:type schema: str
:param delimiter: delimiter of the csv file, default to ','.
:type delimiter: str
:param lineterminator: lineterminator of the csv file.
:type lineterminator: str
:param output_header: header of the csv file, default to True.
:type output_header: bool
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, 'wb') as file:
writer = csv.writer(file,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(self, hql, schema='default', hive_conf=None):
"""
Get a set of records from a Hive query.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: result of hive execution
:rtype: list
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema, hive_conf=hive_conf)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:return: result of hql execution
:rtype: DataFrame
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
:return: pandas.DateFrame
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 | 80,130,069,523,703,410 | 38.291583 | 93 | 0.543468 | false |
facom/GeoTrans | system.py | 1 | 1241 | from geotrans import *
System=dict2obj(dict(\
#########################################
#SYSTEM PRIMARY PARAMETERS
#########################################
#//////////////////////////////
#DETECTOR
#//////////////////////////////
Ddet=0.5,#Aperture, m
qeff=1.0,#Quantum efficiency
#//////////////////////////////
#STAR
#//////////////////////////////
Mstar=1.0*MSUN,
Rstar=1.0*RSUN,
Lstar=1.0*LSUN,
Tstar=1.0*TSUN,
Dstar=1*KILO*PARSEC,
c1=0.70,#Limb Darkening
c2=-0.24,#Limb Darkening
#//////////////////////////////
#ORBIT
#//////////////////////////////
ap=1.0*AU,
ep=0.0,
#iorb=89.95*DEG,#Paper
#iorb=90.00*DEG,
iorb=89.95*DEG,
#iorb=90.1*DEG,
wp=0.0*DEG,
#//////////////////////////////
#PLANET
#//////////////////////////////
Mplanet=1.0*MSAT,
Rplanet=1.0*RSAT,
fp=0.0, #Oblateness
#//////////////////////////////
#RINGS
#//////////////////////////////
fe=RSAT_ARING/RSAT, #Exterior ring (Rp)
#fe=5.0,
fi=RSAT_BRING/RSAT, #Interior ring (Rp)
ir=30.0*DEG, #Ring inclination
phir=60.0*DEG, #Ring roll angle
tau=1.0, #Opacity
))
#########################################
#SYSTEM DERIVATIVE PARAMETERS
#########################################
derivedSystemProperties(System)
updatePlanetRings(System)
updatePosition(System,System.tcen)
| gpl-2.0 | -1,497,280,718,787,404,000 | 21.981481 | 41 | 0.446414 | false |
elego/tkobr-addons | tko_project_task_status/models/project_task.py | 1 | 5572 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, api, fields
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo.osv import expression
from odoo.tools.safe_eval import safe_eval
import time
from odoo.exceptions import Warning
class ProjectTaskActions(models.Model):
_name = 'project.task.action'
name = fields.Char(string='Name', required=True)
expected_duration = fields.Integer(u'Expected Time', default=1, required=True)
expected_duration_unit = fields.Selection([('d', 'Day'), ('w', 'Week'), ('m', 'Month'), ('y', 'Year')],
default='d', required=True, string=u'Expected Time Unit')
filter_id = fields.Many2one('ir.filters','Filter')
filter_warning_message = fields.Text("Warning Message")
done_server_action_id = fields.Many2one('ir.actions.server', string='Done Server Rule', help=u'This server action will be executed when Actions is set to done')
cancel_server_action_id = fields.Many2one('ir.actions.server', string='Cancel Server Rule', help=u'This server action will be executed when Actions is set to cancel')
class ProjectTaskActionsLine(models.Model):
_name = 'project.task.action.line'
action_id = fields.Many2one('project.task.action', u'Actions')
expected_date = fields.Date(u'Expected Date')
done_date = fields.Date(u'Done Date', readonly=True)
task_id = fields.Many2one('project.task', 'Task')
state = fields.Selection([('i', u'In Progress'), ('d', u'Done'), ('c', u'Cancelled')], default='i', required=True,
string='State')
@api.model
def _eval_context(self):
"""Returns a dictionary to use as evaluation context for
ir.rule domains."""
return {'user': self.env.user, 'time': time}
#Validate action filter
def validate_action_filter(self):
"""
Context must have active_id
:return:
"""
model_name = 'project.task'
eval_context = self._eval_context()
active_id = self.task_id.id
if active_id and model_name:
domain = self.action_id.filter_id.domain
rule = expression.normalize_domain(safe_eval(domain, eval_context))
Query = self.env[model_name].sudo()._where_calc(rule, active_test=False)
from_clause, where_clause, where_clause_params = Query.get_sql()
where_str = where_clause and (" WHERE %s" % where_clause) or ''
query_str = 'SELECT id FROM ' + from_clause + where_str
self._cr.execute(query_str, where_clause_params)
result = self._cr.fetchall()
if active_id in [id[0] for id in result]:
return True
return False
def set_done(self):
if self.action_id.filter_id:
# validate filter here
if not self.validate_action_filter():
raise Warning(self.action_id.filter_warning_message or "Warning message not set")
#set to done and execute server action
self.write({'state': 'd', 'done_date':fields.Date.today()})
if self.action_id.done_server_action_id:
new_context = dict(self.env.context)
if 'active_id' not in new_context.keys():
new_context.update({'active_id': self.task_id.id,'active_model':'project.task'})
recs = self.action_id.done_server_action_id.with_context(new_context)
recs.run()
def set_cancel(self):
self.state = 'c'
if self.action_id.cancel_server_action_id:
self.action_id.cancel_server_action_id.run()
@api.onchange('action_id')
def onchange_action(self):
if self.action_id:
days = weeks = months = years = 0
if self.action_id.expected_duration_unit == 'd':
days = self.action_id.expected_duration
if self.action_id.expected_duration_unit == 'w':
weeks = self.action_id.expected_duration
if self.action_id.expected_duration_unit == 'm':
months = self.action_id.expected_duration
if self.action_id.expected_duration_unit == 'y':
years = self.action_id.expected_duration
self.expected_date = datetime.today() + relativedelta(years=years, months=months, weeks=weeks, days=days)
class ProjectTask(models.Model):
_inherit = 'project.task'
action_line_ids = fields.One2many('project.task.action.line', 'task_id', 'Actions')
| agpl-3.0 | 3,694,331,997,314,282,500 | 43.935484 | 170 | 0.618988 | false |
v-legoff/accertin | lyntin/ui/message.py | 1 | 1613 | #######################################################################
# This file is part of Lyntin.
# copyright (c) Free Software Foundation 2001, 2002
#
# Lyntin is distributed under the GNU General Public License license. See the
# file LICENSE for distribution details.
# $Id: message.py,v 1.1 2003/08/01 00:14:52 willhelm Exp $
#######################################################################
"""
Holds the ui's Message class. This gets passed around Lyntin and
allows us to scope data going to the ui.
"""
""" The message type constants."""
ERROR = "ERROR: "
USERDATA = "USERDATA: "
MUDDATA = "MUDDATA: "
LTDATA = "LTDATA: "
""" Used for debugging purposes."""
MESSAGETYPES = {ERROR: "ERROR: ",
USERDATA: "USERDATA: ",
MUDDATA: "MUDDATA: ",
LTDATA: "LTDATA: "}
class Message:
"""
Encapsulates a message to be written to the user.
"""
def __init__(self, data, messagetype=LTDATA, ses=None):
"""
Initialize.
@param data: the message string
@type data: string
@param messagetype: the message type (use a constant defined in ui.ui)
@type messagetype: int
@param ses: the session this message belongs to
@type ses: session.Session
"""
self.session = ses
self.data = data
self.type = messagetype
def __repr__(self):
"""
Represents the message (returns data + type).
"""
return repr(self.session) + MESSAGETYPES[self.type] + repr(self.data)
def __str__(self):
"""
The string representation of the Message is the data
itself.
"""
return self.data
| gpl-3.0 | -5,694,003,833,159,969,000 | 25.883333 | 78 | 0.578425 | false |
loads/molotov | molotov/tests/test_sharedconsole.py | 1 | 2723 | import unittest
import asyncio
import sys
import os
import re
import io
from molotov.util import multiprocessing
from molotov.sharedconsole import SharedConsole
from molotov.tests.support import dedicatedloop, catch_output
OUTPUT = """\
one
two
3
TypeError\\("unsupported operand type(.*)?
TypeError\\("unsupported operand type.*"""
# pre-forked variable
_CONSOLE = SharedConsole(interval=0.0)
_PROC = []
def run_worker(input):
if os.getpid() not in _PROC:
_PROC.append(os.getpid())
_CONSOLE.print("hello")
try:
3 + ""
except Exception:
_CONSOLE.print_error("meh")
with catch_output() as (stdout, stderr):
loop = asyncio.new_event_loop()
fut = asyncio.ensure_future(_CONSOLE.display(), loop=loop)
loop.run_until_complete(fut)
loop.close()
stdout = stdout.read()
assert stdout == "", stdout
class TestSharedConsole(unittest.TestCase):
@dedicatedloop
def test_simple_usage(self):
test_loop = asyncio.get_event_loop()
stream = io.StringIO()
console = SharedConsole(interval=0.0, stream=stream)
async def add_lines():
console.print("one")
console.print("two")
console.print("3")
try:
1 + "e"
except Exception as e:
console.print_error(e)
console.print_error(e, sys.exc_info()[2])
await asyncio.sleep(0.2)
await console.stop()
with catch_output() as (stdout, stderr):
adder = asyncio.ensure_future(add_lines())
displayer = asyncio.ensure_future(console.display())
test_loop.run_until_complete(asyncio.gather(adder, displayer))
stream.seek(0)
output = stream.read()
test_loop.close()
self.assertTrue(re.match(OUTPUT, output, re.S | re.M) is not None, output)
@unittest.skipIf(os.name == "nt", "win32")
@dedicatedloop
def test_multiprocess(self):
test_loop = asyncio.get_event_loop()
# now let's try with several processes
pool = multiprocessing.Pool(3)
try:
inputs = [1] * 3
pool.map(run_worker, inputs)
finally:
pool.close()
async def stop():
await asyncio.sleep(1)
await _CONSOLE.stop()
with catch_output() as (stdout, stderr):
stop = asyncio.ensure_future(stop())
display = asyncio.ensure_future(_CONSOLE.display())
test_loop.run_until_complete(asyncio.gather(stop, display))
output = stdout.read()
for pid in _PROC:
self.assertTrue("[%d]" % pid in output)
test_loop.close()
| apache-2.0 | 5,061,824,458,593,446,000 | 26.505051 | 82 | 0.59126 | false |
rbooth200/DiscEvolution | DiscEvolution/driver.py | 1 | 12630 | # driver.py
#
# Author: R. Booth
# Date: 17 - Nov - 2016
#
# Combined model for dust, gas and chemical evolution
################################################################################
from __future__ import print_function
import numpy as np
import os
from .photoevaporation import FixedExternalEvaporation
from .constants import yr
from . import io
class DiscEvolutionDriver(object):
"""Driver class for full evolution model.
Required Arguments:
disc : Disc model to update
Optional Physics update:
dust : Update the dust, i.e. radial drift
gas : Update due to gas effects, i.e. Viscous evolution
diffusion : Seperate diffusion update
internal_photo : Remove gas by internal photoevaporation
photoevaporation : Remove gas by external photoevaporation
chemistry : Solver for the chemical evolution
History:
history : Tracks values of key parameters over time
Note: Diffusion is usually handled in the dust dynamics module
Other options:
t0 : Starting time, default = 0, code units
t_out:Previous output times, default = None, years
"""
def __init__(self, disc, gas=None, dust=None, diffusion=None, chemistry=None, ext_photoevaporation=None, int_photoevaporation=None, history=None, t0=0.):
self._disc = disc
self._gas = gas
self._dust = dust
self._diffusion = diffusion
self._chemistry = chemistry
self._external_photo = ext_photoevaporation
self._internal_photo = int_photoevaporation
self._history = history
self._t = t0
self._nstep = 0
def __call__(self, tmax):
"""Evolve the disc for a single timestep
args:
dtmax : Upper limit to time-step
returns:
dt : Time step taken
"""
disc = self._disc
# Compute the maximum time-step
dt = tmax - self.t
if self._gas:
dt = min(dt, self._gas.max_timestep(self._disc))
if self._dust:
v_visc = self._gas.viscous_velocity(disc)
dt = min(dt, self._dust.max_timestep(self._disc, v_visc))
if self._dust._diffuse:
dt = min(dt, self._dust._diffuse.max_timestep(self._disc))
if self._diffusion:
dt = min(dt, self._diffusion.max_timestep(self._disc))
if self._external_photo and hasattr(self._external_photo,"_density"): # If we are using density to calculate mass loss rates, we need to limit the time step based on photoevaporation
(dM_dot, dM_gas) = self._external_photo.optically_thin_weighting(disc)
Dt = dM_gas[(dM_dot>0)] / dM_dot[(dM_dot>0)]
Dt_min = np.min(Dt)
dt = min(dt,Dt_min)
# Determine tracers for dust step
gas_chem, ice_chem = None, None
dust = None
try:
gas_chem = disc.chem.gas.data
ice_chem = disc.chem.ice.data
except AttributeError:
pass
# Do dust evolution
if self._dust:
self._dust(dt, disc,
gas_tracers=gas_chem,
dust_tracers=ice_chem, v_visc=v_visc)
# Determine tracers for gas steps
try:
gas_chem = disc.chem.gas.data
ice_chem = disc.chem.ice.data
except AttributeError:
pass
try:
dust = disc.dust_frac
except AttributeError:
pass
# Do Advection-diffusion update
if self._gas:
self._gas(dt, disc, [dust, gas_chem, ice_chem])
if self._diffusion:
if gas_chem is not None:
gas_chem[:] += dt * self._diffusion(disc, gas_chem)
if ice_chem is not None:
ice_chem[:] += dt * self._diffusion(disc, ice_chem)
if dust is not None:
dust[:] += dt * self._diffusion(disc, dust)
# Do external photoevaporation
if self._external_photo:
self._external_photo(disc, dt)
# Do internal photoevaporation
if self._internal_photo:
self._internal_photo(disc, dt/yr, self._external_photo)
# Pin the values to >= 0 and <=1:
disc.Sigma[:] = np.maximum(disc.Sigma, 0)
try:
disc.dust_frac[:] = np.maximum(disc.dust_frac, 0)
disc.dust_frac[:] /= np.maximum(disc.dust_frac.sum(0), 1.0)
except AttributeError:
pass
try:
disc.chem.gas.data[:] = np.maximum(disc.chem.gas.data, 0)
disc.chem.ice.data[:] = np.maximum(disc.chem.ice.data, 0)
except AttributeError:
pass
# Chemistry
if self._chemistry:
rho = disc.midplane_gas_density
eps = disc.dust_frac.sum(0)
grain_size = disc.grain_size[-1]
T = disc.T
self._chemistry.update(dt, T, rho, eps, disc.chem,
grain_size=grain_size)
# If we have dust, we should update it now the ice fraction has
# changed
disc.update_ices(disc.chem.ice)
# Now we should update the auxillary properties, do grain growth etc
disc.update(dt)
self._t += dt
self._nstep += 1
return dt
@property
def disc(self):
return self._disc
@property
def t(self):
return self._t
@property
def num_steps(self):
return self._nstep
@property
def gas(self):
return self._gas
@property
def dust(self):
return self._dust
@property
def diffusion(self):
return self._diffusion
@property
def chemistry(self):
return self._chemistry
@property
def photoevaporation_external(self):
return self._external_photo
@property
def photoevaporation_internal(self):
return self._internal_photo
@property
def history(self):
return self._history
def dump_ASCII(self, filename):
"""Write the current state to a file, including header information"""
# Put together a header containing information about the physics
# included
head = ''
if self._gas:
head += self._gas.ASCII_header() + '\n'
if self._dust:
head += self._dust.ASCII_header() + '\n'
if self._diffusion:
head += self._diffusion.ASCII_header() + '\n'
if self._chemistry:
head += self._chemistry.ASCII_header() + '\n'
if self._external_photo:
head += self._external_photo.ASCII_header() + '\n'
if self._internal_photo:
head += self._internal_photo.ASCII_header() + '\n'
# Write it all to disc
io.dump_ASCII(filename, self._disc, self.t, head)
def dump_hdf5(self, filename):
"""Write the current state in HDF5 format, with header information"""
headers = []
if self._gas: headers.append(self._gas.HDF5_attributes())
if self._dust: headers.append(self._dust.HDF5_attributes())
if self._diffusion: headers.append(self._diffusion.HDF5_attributes())
if self._chemistry: headers.append(self._chemistry.HDF5_attributes())
if self._external_photo: headers.append(self._external_photo.HDF5_attributes())
if self._internal_photo: headers.append(self._internal_photo.HDF5_attributes())
io.dump_hdf5(filename, self._disc, self.t, headers)
if __name__ == "__main__":
from .star import SimpleStar
from .grid import Grid
from .eos import IrradiatedEOS
from .viscous_evolution import ViscousEvolution
from .dust import DustGrowthTwoPop, SingleFluidDrift
from .opacity import Zhu2012, Tazzari2016
from .diffusion import TracerDiffusion
from .chemistry import TimeDepCOChemOberg, SimpleCOAtomAbund
from .constants import Msun, AU
from .disc_utils import mkdir_p
import matplotlib.pyplot as plt
alpha = 1e-3
Mdot = 1e-8
Rd = 100.
#kappa = Zhu2012
kappa = Tazzari2016()
N_cell = 250
R_in = 0.1
R_out = 500.
yr = 2*np.pi
output_dir = 'test_DiscEvo'
output_times = np.arange(0, 4) * 1e6 * yr
plot_times = np.array([0, 1e4, 1e5, 5e5, 1e6, 3e6])*yr
# Setup the initial conditions
Mdot *= (Msun / yr) / AU**2
grid = Grid(R_in, R_out, N_cell, spacing='natural')
star = SimpleStar(M=1, R=2.5, T_eff=4000.)
# Initial guess for Sigma:
R = grid.Rc
Sigma = (Mdot / (0.1 * alpha * R**2 * star.Omega_k(R))) * np.exp(-R/Rd)
# Iterate until constant Mdot
eos = IrradiatedEOS(star, alpha, kappa=kappa)
eos.set_grid(grid)
eos.update(0, Sigma)
for i in range(100):
Sigma = 0.5 * (Sigma + (Mdot / (3 * np.pi * eos.nu)) * np.exp(-R/Rd))
eos.update(0, Sigma)
# Create the disc object
disc = DustGrowthTwoPop(grid, star, eos, 0.01, Sigma=Sigma)
# Setup the chemistry
chemistry = TimeDepCOChemOberg(a=1e-5)
# Setup the dust-to-gas ratio from the chemistry
solar_abund = SimpleCOAtomAbund(N_cell)
solar_abund.set_solar_abundances()
# Iterate ice fractions to get the dust-to-gas ratio:
for i in range(10):
chem = chemistry.equilibrium_chem(disc.T,
disc.midplane_gas_density,
disc.dust_frac.sum(0),
solar_abund)
disc.initialize_dust_density(chem.ice.total_abund)
disc.chem = chem
# Setup the dynamics modules:
gas = ViscousEvolution()
dust = SingleFluidDrift(TracerDiffusion())
evo = DiscEvolutionDriver(disc, gas=gas, dust=dust, chemistry=chemistry)
# Setup the IO controller
IO = io.Event_Controller(save=output_times, plot=plot_times)
# Run the model!
while not IO.finished():
ti = IO.next_event_time()
while evo.t < ti:
dt = evo(ti)
if (evo.num_steps % 1000) == 0:
print('Nstep: {}'.format(evo.num_steps))
print('Time: {} yr'.format(evo.t / yr))
print('dt: {} yr'.format(dt / yr))
if IO.check_event(evo.t, 'save'):
from .disc_utils import mkdir_p
mkdir_p(output_dir)
snap_name = 'disc_{:04d}.dat'.format(IO.event_number('save'))
evo.dump_ASCII(os.path.join(output_dir, snap_name))
snap_name = 'disc_{:04d}.h5'.format(IO.event_number('save'))
evo.dump_hdf5(os.path.join(output_dir, snap_name))
if IO.check_event(evo.t, 'plot'):
err_state = np.seterr(all='warn')
print('Nstep: {}'.format(evo.num_steps))
print('Time: {} yr'.format(evo.t / (2 * np.pi)))
plt.subplot(321)
l, = plt.loglog(grid.Rc, evo.disc.Sigma_G)
plt.loglog(grid.Rc, evo.disc.Sigma_D.sum(0), '--', c=l.get_color())
plt.xlabel('$R$')
plt.ylabel('$\Sigma_\mathrm{G, D}$')
plt.subplot(322)
plt.loglog(grid.Rc, evo.disc.dust_frac.sum(0))
plt.xlabel('$R$')
plt.ylabel('$\epsilon$')
plt.subplot(323)
plt.loglog(grid.Rc, evo.disc.Stokes()[1])
plt.xlabel('$R$')
plt.ylabel('$St$')
plt.subplot(324)
plt.loglog(grid.Rc, evo.disc.grain_size[1])
plt.xlabel('$R$')
plt.ylabel('$a\,[\mathrm{cm}]$')
plt.subplot(325)
gCO = evo.disc.chem.gas.atomic_abundance()
sCO = evo.disc.chem.ice.atomic_abundance()
gCO.data[:] /= solar_abund.data
sCO.data[:] /= solar_abund.data
c = l.get_color()
plt.semilogx(grid.Rc, gCO['C'], '-', c=c, linewidth=1)
plt.semilogx(grid.Rc, gCO['O'], '-', c=c, linewidth=2)
plt.semilogx(grid.Rc, sCO['C'], ':', c=c, linewidth=1)
plt.semilogx(grid.Rc, sCO['O'], ':', c=c, linewidth=2)
plt.xlabel('$R\,[\mathrm{au}}$')
plt.ylabel('$[X]_\mathrm{solar}$')
plt.subplot(326)
plt.semilogx(grid.Rc, gCO['C'] / gCO['O'], '-', c=c)
plt.semilogx(grid.Rc, sCO['C'] / sCO['O'], ':', c=c)
plt.xlabel('$R\,[\mathrm{au}}$')
plt.ylabel('$[C/O]_\mathrm{solar}$')
np.seterr(**err_state)
IO.pop_events(evo.t)
if len(plot_times) > 0:
plt.show()
| gpl-3.0 | -6,679,079,293,049,471,000 | 31.976501 | 190 | 0.553286 | false |
bcoca/ansible | lib/ansible/modules/dnf.py | 1 | 53556 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015 Cristian van Ee <cristian at cvee.org>
# Copyright 2015 Igor Gnatenko <[email protected]>
# Copyright 2018 Adam Miller <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: dnf
version_added: 1.9
short_description: Manages packages with the I(dnf) package manager
description:
- Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
options:
name:
description:
- "A package name or package specifier with version, like C(name-1.0).
When using state=latest, this can be '*' which means run: dnf -y update.
You can also pass a url or a local path to a rpm file.
To operate on several packages this can accept a comma separated string of packages or a list of packages."
- Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name>=1.0)
required: true
aliases:
- pkg
type: list
elements: str
list:
description:
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
type: str
state:
description:
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
- Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
enabled for this module, then C(absent) is inferred.
choices: ['absent', 'present', 'installed', 'removed', 'latest']
type: str
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
type: list
elements: str
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
type: list
elements: str
conf_file:
description:
- The remote dnf configuration file to use for the transaction.
type: str
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
- This setting affects packages installed from a repository as well as
"local" packages installed from the filesystem or a URL.
type: bool
default: 'no'
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
version_added: "2.3"
default: "/"
type: str
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
version_added: "2.6"
type: str
autoremove:
description:
- If C(yes), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
required by any such package. Should be used alone or when state is I(absent)
type: bool
default: "no"
version_added: "2.4"
exclude:
description:
- Package name(s) to exclude when state=present, or latest. This can be a
list or a comma separated string.
version_added: "2.7"
type: list
elements: str
skip_broken:
description:
- Skip packages with broken dependencies(devsolve) and are causing problems.
type: bool
default: "no"
version_added: "2.7"
update_cache:
description:
- Force dnf to check if cache is out of date and redownload if needed.
Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "2.7"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if state is I(latest)
default: "no"
type: bool
version_added: "2.7"
security:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
- Note that, similar to ``dnf upgrade-minimal``, this filter applies to dependencies as well.
type: bool
default: "no"
version_added: "2.7"
bugfix:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
- Note that, similar to ``dnf upgrade-minimal``, this filter applies to dependencies as well.
default: "no"
type: bool
version_added: "2.7"
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
version_added: "2.7"
type: list
elements: str
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
version_added: "2.7"
type: list
elements: str
disable_excludes:
description:
- Disable the excludes defined in DNF config files.
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in dnf.conf.
- If set to C(repoid), disable excludes defined for given repo id.
version_added: "2.7"
type: str
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
type: bool
default: "yes"
version_added: "2.7"
allow_downgrade:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
Note that setting allow_downgrade=True can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
package and others can cause changes to the packages which were
in the earlier transaction).
type: bool
default: "no"
version_added: "2.7"
install_repoquery:
description:
- This is effectively a no-op in DNF as it is not needed with DNF, but is an accepted parameter for feature
parity/compatibility with the I(yum) module.
type: bool
default: "yes"
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
default: "no"
type: bool
version_added: "2.7"
lock_timeout:
description:
- Amount of time to wait for the dnf lockfile to be freed.
required: false
default: 30
type: int
version_added: "2.8"
install_weak_deps:
description:
- Will also install all packages linked by a weak dependency relation.
type: bool
default: "yes"
version_added: "2.8"
download_dir:
description:
- Specifies an alternate directory to store packages.
- Has an effect only if I(download_only) is specified.
type: str
version_added: "2.8"
allowerasing:
description:
- If C(yes) it allows erasing of installed packages to resolve dependencies.
required: false
type: bool
default: "no"
version_added: "2.10"
nobest:
description:
- Set best option to False, so that transactions are not limited to best candidates only.
required: false
type: bool
default: "no"
version_added: "2.11"
cacheonly:
description:
- Tells dnf to run entirely from system cache; does not download or update metadata.
type: bool
default: "no"
version_added: "2.12"
notes:
- When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
- Group removal doesn't work if the group was installed with Ansible because
upstream dnf's API doesn't properly mark groups as installed, therefore upon
removal the module is unable to detect that the group is installed
(https://bugzilla.redhat.com/show_bug.cgi?id=1620324)
requirements:
- "python >= 2.6"
- python-dnf
- for the autoremove option you need dnf >= 2.0.1"
author:
- Igor Gnatenko (@ignatenkobrain) <[email protected]>
- Cristian van Ee (@DJMuggs) <cristian at cvee.org>
- Berend De Schouwer (@berenddeschouwer)
- Adam Miller (@maxamillion) <[email protected]>
'''
EXAMPLES = '''
- name: Install the latest version of Apache
dnf:
name: httpd
state: latest
- name: Install Apache >= 2.4
dnf:
name: httpd>=2.4
state: present
- name: Install the latest version of Apache and MariaDB
dnf:
name:
- httpd
- mariadb-server
state: latest
- name: Remove the Apache package
dnf:
name: httpd
state: absent
- name: Install the latest version of Apache from the testing repo
dnf:
name: httpd
enablerepo: testing
state: present
- name: Upgrade all packages
dnf:
name: "*"
state: latest
- name: Install the nginx rpm from a remote repo
dnf:
name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm'
state: present
- name: Install nginx rpm from a local file
dnf:
name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: Install the 'Development tools' package group
dnf:
name: '@Development tools'
state: present
- name: Autoremove unneeded packages installed as dependencies
dnf:
autoremove: yes
- name: Uninstall httpd but keep its dependencies
dnf:
name: httpd
state: absent
autoremove: no
- name: Install a modularity appstream with defined stream and profile
dnf:
name: '@postgresql:9.6/client'
state: present
- name: Install a modularity appstream with defined stream
dnf:
name: '@postgresql:9.6'
state: present
- name: Install a modularity appstream with defined profile
dnf:
name: '@postgresql/client'
state: present
'''
import os
import re
import sys
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_file
from ansible.module_utils.six import PY2, text_type
from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
try:
import dnf
import dnf.cli
import dnf.const
import dnf.exceptions
import dnf.subject
import dnf.util
HAS_DNF = True
except ImportError:
HAS_DNF = False
class DnfModule(YumDnf):
"""
DNF Ansible module back-end implementation
"""
def __init__(self, module):
# This populates instance vars for all argument spec params
super(DnfModule, self).__init__(module)
self._ensure_dnf()
self.lockfile = "/var/cache/dnf/*_lock.pid"
self.pkg_mgr_name = "dnf"
try:
self.with_modules = dnf.base.WITH_MODULES
except AttributeError:
self.with_modules = False
# DNF specific args that are not part of YumDnf
self.allowerasing = self.module.params['allowerasing']
self.nobest = self.module.params['nobest']
def is_lockfile_pid_valid(self):
# FIXME? it looks like DNF takes care of invalid lock files itself?
# https://github.com/ansible/ansible/issues/57189
return True
def _sanitize_dnf_error_msg_install(self, spec, error):
"""
For unhandled dnf.exceptions.Error scenarios, there are certain error
messages we want to filter in an install scenario. Do that here.
"""
if (
to_text("no package matched") in to_text(error) or
to_text("No match for argument:") in to_text(error)
):
return "No package {0} available.".format(spec)
return error
def _sanitize_dnf_error_msg_remove(self, spec, error):
"""
For unhandled dnf.exceptions.Error scenarios, there are certain error
messages we want to ignore in a removal scenario as known benign
failures. Do that here.
"""
if (
'no package matched' in to_native(error) or
'No match for argument:' in to_native(error)
):
return (False, "{0} is not installed".format(spec))
# Return value is tuple of:
# ("Is this actually a failure?", "Error Message")
return (True, error)
def _package_dict(self, package):
"""Return a dictionary of information for the package."""
# NOTE: This no longer contains the 'dnfstate' field because it is
# already known based on the query type.
result = {
'name': package.name,
'arch': package.arch,
'epoch': str(package.epoch),
'release': package.release,
'version': package.version,
'repo': package.repoid}
result['nevra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(
**result)
if package.installtime == 0:
result['yumstate'] = 'available'
else:
result['yumstate'] = 'installed'
return result
def _packagename_dict(self, packagename):
"""
Return a dictionary of information for a package name string or None
if the package name doesn't contain at least all NVR elements
"""
if packagename[-4:] == '.rpm':
packagename = packagename[:-4]
# This list was auto generated on a Fedora 28 system with the following one-liner
# printf '[ '; for arch in $(ls /usr/lib/rpm/platform); do printf '"%s", ' ${arch%-linux}; done; printf ']\n'
redhat_rpm_arches = [
"aarch64", "alphaev56", "alphaev5", "alphaev67", "alphaev6", "alpha",
"alphapca56", "amd64", "armv3l", "armv4b", "armv4l", "armv5tejl", "armv5tel",
"armv5tl", "armv6hl", "armv6l", "armv7hl", "armv7hnl", "armv7l", "athlon",
"geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "m68k", "mips64el",
"mips64", "mips64r6el", "mips64r6", "mipsel", "mips", "mipsr6el", "mipsr6",
"noarch", "pentium3", "pentium4", "ppc32dy4", "ppc64iseries", "ppc64le", "ppc64",
"ppc64p7", "ppc64pseries", "ppc8260", "ppc8560", "ppciseries", "ppc", "ppcpseries",
"riscv64", "s390", "s390x", "sh3", "sh4a", "sh4", "sh", "sparc64", "sparc64v",
"sparc", "sparcv8", "sparcv9", "sparcv9v", "x86_64"
]
rpm_arch_re = re.compile(r'(.*)\.(.*)')
rpm_nevr_re = re.compile(r'(\S+)-(?:(\d*):)?(.*)-(~?\w+[\w.+]*)')
try:
arch = None
rpm_arch_match = rpm_arch_re.match(packagename)
if rpm_arch_match:
nevr, arch = rpm_arch_match.groups()
if arch in redhat_rpm_arches:
packagename = nevr
rpm_nevr_match = rpm_nevr_re.match(packagename)
if rpm_nevr_match:
name, epoch, version, release = rpm_nevr_re.match(packagename).groups()
if not version or not version.split('.')[0].isdigit():
return None
else:
return None
except AttributeError as e:
self.module.fail_json(
msg='Error attempting to parse package: %s, %s' % (packagename, to_native(e)),
rc=1,
results=[]
)
if not epoch:
epoch = "0"
if ':' in name:
epoch_name = name.split(":")
epoch = epoch_name[0]
name = ''.join(epoch_name[1:])
result = {
'name': name,
'epoch': epoch,
'release': release,
'version': version,
}
return result
# Original implementation from yum.rpmUtils.miscutils (GPLv2+)
# http://yum.baseurl.org/gitweb?p=yum.git;a=blob;f=rpmUtils/miscutils.py
def _compare_evr(self, e1, v1, r1, e2, v2, r2):
# return 1: a is newer than b
# 0: a and b are the same version
# -1: b is newer than a
if e1 is None:
e1 = '0'
else:
e1 = str(e1)
v1 = str(v1)
r1 = str(r1)
if e2 is None:
e2 = '0'
else:
e2 = str(e2)
v2 = str(v2)
r2 = str(r2)
# print '%s, %s, %s vs %s, %s, %s' % (e1, v1, r1, e2, v2, r2)
rc = dnf.rpm.rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
# print '%s, %s, %s vs %s, %s, %s = %s' % (e1, v1, r1, e2, v2, r2, rc)
return rc
def _ensure_dnf(self):
if HAS_DNF:
return
system_interpreters = ['/usr/libexec/platform-python',
'/usr/bin/python3',
'/usr/bin/python2',
'/usr/bin/python']
if not has_respawned():
# probe well-known system Python locations for accessible bindings, favoring py3
interpreter = probe_interpreters_for_module(system_interpreters, 'dnf')
if interpreter:
# respawn under the interpreter where the bindings should be found
respawn_module(interpreter)
# end of the line for this module, the process will exit here once the respawned module completes
# done all we can do, something is just broken (auto-install isn't useful anymore with respawn, so it was removed)
self.module.fail_json(
msg="Could not import the dnf python module using {0} ({1}). "
"Please install `python3-dnf` or `python2-dnf` package or ensure you have specified the "
"correct ansible_python_interpreter. (attempted {2})"
.format(sys.executable, sys.version.replace('\n', ''), system_interpreters),
results=[]
)
def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/'):
"""Configure the dnf Base object."""
conf = base.conf
# Change the configuration file path if provided, this must be done before conf.read() is called
if conf_file:
# Fail if we can't read the configuration file.
if not os.access(conf_file, os.R_OK):
self.module.fail_json(
msg="cannot read configuration file", conf_file=conf_file,
results=[],
)
else:
conf.config_file_path = conf_file
# Read the configuration file
conf.read()
# Turn off debug messages in the output
conf.debuglevel = 0
# Set whether to check gpg signatures
conf.gpgcheck = not disable_gpg_check
conf.localpkg_gpgcheck = not disable_gpg_check
# Don't prompt for user confirmations
conf.assumeyes = True
# Set installroot
conf.installroot = installroot
# Load substitutions from the filesystem
conf.substitutions.update_from_etc(installroot)
# Handle different DNF versions immutable mutable datatypes and
# dnf v1/v2/v3
#
# In DNF < 3.0 are lists, and modifying them works
# In DNF >= 3.0 < 3.6 are lists, but modifying them doesn't work
# In DNF >= 3.6 have been turned into tuples, to communicate that modifying them doesn't work
#
# https://www.happyassassin.net/2018/06/27/adams-debugging-adventures-the-immutable-mutable-object/
#
# Set excludes
if self.exclude:
_excludes = list(conf.exclude)
_excludes.extend(self.exclude)
conf.exclude = _excludes
# Set disable_excludes
if self.disable_excludes:
_disable_excludes = list(conf.disable_excludes)
if self.disable_excludes not in _disable_excludes:
_disable_excludes.append(self.disable_excludes)
conf.disable_excludes = _disable_excludes
# Set releasever
if self.releasever is not None:
conf.substitutions['releasever'] = self.releasever
# Set skip_broken (in dnf this is strict=0)
if self.skip_broken:
conf.strict = 0
# Set best
if self.nobest:
conf.best = 0
if self.download_only:
conf.downloadonly = True
if self.download_dir:
conf.destdir = self.download_dir
if self.cacheonly:
conf.cacheonly = True
# Default in dnf upstream is true
conf.clean_requirements_on_remove = self.autoremove
# Default in dnf (and module default) is True
conf.install_weak_deps = self.install_weak_deps
def _specify_repositories(self, base, disablerepo, enablerepo):
"""Enable and disable repositories matching the provided patterns."""
base.read_all_repos()
repos = base.repos
# Disable repositories
for repo_pattern in disablerepo:
if repo_pattern:
for repo in repos.get_matching(repo_pattern):
repo.disable()
# Enable repositories
for repo_pattern in enablerepo:
if repo_pattern:
for repo in repos.get_matching(repo_pattern):
repo.enable()
def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot):
"""Return a fully configured dnf Base object."""
base = dnf.Base()
self._configure_base(base, conf_file, disable_gpg_check, installroot)
try:
# this method has been supported in dnf-4.2.17-6 or later
# https://bugzilla.redhat.com/show_bug.cgi?id=1788212
base.setup_loggers()
except AttributeError:
pass
try:
base.init_plugins(set(self.disable_plugin), set(self.enable_plugin))
base.pre_configure_plugins()
except AttributeError:
pass # older versions of dnf didn't require this and don't have these methods
self._specify_repositories(base, disablerepo, enablerepo)
try:
base.configure_plugins()
except AttributeError:
pass # older versions of dnf didn't require this and don't have these methods
try:
if self.update_cache:
try:
base.update_cache()
except dnf.exceptions.RepoError as e:
self.module.fail_json(
msg="{0}".format(to_text(e)),
results=[],
rc=1
)
base.fill_sack(load_system_repo='auto')
except dnf.exceptions.RepoError as e:
self.module.fail_json(
msg="{0}".format(to_text(e)),
results=[],
rc=1
)
filters = []
if self.bugfix:
key = {'advisory_type__eq': 'bugfix'}
filters.append(base.sack.query().upgrades().filter(**key))
if self.security:
key = {'advisory_type__eq': 'security'}
filters.append(base.sack.query().upgrades().filter(**key))
if filters:
base._update_security_filters = filters
return base
def list_items(self, command):
"""List package info based on the command."""
# Rename updates to upgrades
if command == 'updates':
command = 'upgrades'
# Return the corresponding packages
if command in ['installed', 'upgrades', 'available']:
results = [
self._package_dict(package)
for package in getattr(self.base.sack.query(), command)()]
# Return the enabled repository ids
elif command in ['repos', 'repositories']:
results = [
{'repoid': repo.id, 'state': 'enabled'}
for repo in self.base.repos.iter_enabled()]
# Return any matching packages
else:
packages = dnf.subject.Subject(command).get_best_query(self.base.sack)
results = [self._package_dict(package) for package in packages]
self.module.exit_json(msg="", results=results)
def _is_installed(self, pkg):
installed = self.base.sack.query().installed()
if installed.filter(name=pkg):
return True
else:
return False
def _is_newer_version_installed(self, pkg_name):
candidate_pkg = self._packagename_dict(pkg_name)
if not candidate_pkg:
# The user didn't provide a versioned rpm, so version checking is
# not required
return False
installed = self.base.sack.query().installed()
installed_pkg = installed.filter(name=candidate_pkg['name']).run()
if installed_pkg:
installed_pkg = installed_pkg[0]
# this looks weird but one is a dict and the other is a dnf.Package
evr_cmp = self._compare_evr(
installed_pkg.epoch, installed_pkg.version, installed_pkg.release,
candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'],
)
if evr_cmp == 1:
return True
else:
return False
else:
return False
def _mark_package_install(self, pkg_spec, upgrade=False):
"""Mark the package for install."""
is_newer_version_installed = self._is_newer_version_installed(pkg_spec)
is_installed = self._is_installed(pkg_spec)
try:
if is_newer_version_installed:
if self.allow_downgrade:
# dnf only does allow_downgrade, we have to handle this ourselves
# because it allows a possibility for non-idempotent transactions
# on a system's package set (pending the yum repo has many old
# NVRs indexed)
if upgrade:
if is_installed:
self.base.upgrade(pkg_spec)
else:
self.base.install(pkg_spec)
else:
self.base.install(pkg_spec)
else: # Nothing to do, report back
pass
elif is_installed: # An potentially older (or same) version is installed
if upgrade:
self.base.upgrade(pkg_spec)
else: # Nothing to do, report back
pass
else: # The package is not installed, simply install it
self.base.install(pkg_spec)
return {'failed': False, 'msg': '', 'failure': '', 'rc': 0}
except dnf.exceptions.MarkingError as e:
return {
'failed': True,
'msg': "No package {0} available.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
except dnf.exceptions.DepsolveError as e:
return {
'failed': True,
'msg': "Depsolve Error occurred for package {0}.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
except dnf.exceptions.Error as e:
if to_text("already installed") in to_text(e):
return {'failed': False, 'msg': '', 'failure': ''}
else:
return {
'failed': True,
'msg': "Unknown Error occurred for package {0}.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
def _whatprovides(self, filepath):
available = self.base.sack.query().available()
pkg_spec = available.filter(provides=filepath).run()
if pkg_spec:
return pkg_spec[0].name
def _parse_spec_group_file(self):
pkg_specs, grp_specs, module_specs, filenames = [], [], [], []
already_loaded_comps = False # Only load this if necessary, it's slow
for name in self.names:
if '://' in name:
name = fetch_file(self.module, name)
filenames.append(name)
elif name.endswith(".rpm"):
filenames.append(name)
elif name.startswith("@") or ('/' in name):
# like "dnf install /usr/bin/vi"
if '/' in name:
pkg_spec = self._whatprovides(name)
if pkg_spec:
pkg_specs.append(pkg_spec)
continue
if not already_loaded_comps:
self.base.read_comps()
already_loaded_comps = True
grp_env_mdl_candidate = name[1:].strip()
if self.with_modules:
mdl = self.module_base._get_modules(grp_env_mdl_candidate)
if mdl[0]:
module_specs.append(grp_env_mdl_candidate)
else:
grp_specs.append(grp_env_mdl_candidate)
else:
grp_specs.append(grp_env_mdl_candidate)
else:
pkg_specs.append(name)
return pkg_specs, grp_specs, module_specs, filenames
def _update_only(self, pkgs):
not_installed = []
for pkg in pkgs:
if self._is_installed(pkg):
try:
if isinstance(to_text(pkg), text_type):
self.base.upgrade(pkg)
else:
self.base.package_upgrade(pkg)
except Exception as e:
self.module.fail_json(
msg="Error occurred attempting update_only operation: {0}".format(to_native(e)),
results=[],
rc=1,
)
else:
not_installed.append(pkg)
return not_installed
def _install_remote_rpms(self, filenames):
if int(dnf.__version__.split(".")[0]) >= 2:
pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True))
else:
pkgs = []
try:
for filename in filenames:
pkgs.append(self.base.add_remote_rpm(filename))
except IOError as e:
if to_text("Can not load RPM file") in to_text(e):
self.module.fail_json(
msg="Error occurred attempting remote rpm install of package: {0}. {1}".format(filename, to_native(e)),
results=[],
rc=1,
)
if self.update_only:
self._update_only(pkgs)
else:
for pkg in pkgs:
try:
if self._is_newer_version_installed(self._package_dict(pkg)['nevra']):
if self.allow_downgrade:
self.base.package_install(pkg)
else:
self.base.package_install(pkg)
except Exception as e:
self.module.fail_json(
msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)),
results=[],
rc=1,
)
def _is_module_installed(self, module_spec):
if self.with_modules:
module_spec = module_spec.strip()
module_list, nsv = self.module_base._get_modules(module_spec)
enabled_streams = self.base._moduleContainer.getEnabledStream(nsv.name)
if enabled_streams:
if nsv.stream:
if nsv.stream in enabled_streams:
return True # The provided stream was found
else:
return False # The provided stream was not found
else:
return True # No stream provided, but module found
return False # seems like a sane default
def ensure(self):
response = {
'msg': "",
'changed': False,
'results': [],
'rc': 0
}
# Accumulate failures. Package management modules install what they can
# and fail with a message about what they can't.
failure_response = {
'msg': "",
'failures': [],
'results': [],
'rc': 1
}
# Autoremove is called alone
# Jump to remove path where base.autoremove() is run
if not self.names and self.autoremove:
self.names = []
self.state = 'absent'
if self.names == ['*'] and self.state == 'latest':
try:
self.base.upgrade_all()
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to upgrade all packages"
self.module.fail_json(**failure_response)
else:
pkg_specs, group_specs, module_specs, filenames = self._parse_spec_group_file()
pkg_specs = [p.strip() for p in pkg_specs]
filenames = [f.strip() for f in filenames]
groups = []
environments = []
for group_spec in (g.strip() for g in group_specs):
group = self.base.comps.group_by_pattern(group_spec)
if group:
groups.append(group.id)
else:
environment = self.base.comps.environment_by_pattern(group_spec)
if environment:
environments.append(environment.id)
else:
self.module.fail_json(
msg="No group {0} available.".format(group_spec),
results=[],
)
if self.state in ['installed', 'present']:
# Install files.
self._install_remote_rpms(filenames)
for filename in filenames:
response['results'].append("Installed {0}".format(filename))
# Install modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if not self._is_module_installed(module):
response['results'].append("Module {0} installed.".format(module))
self.module_base.install([module])
self.module_base.enable([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
# Install groups.
for group in groups:
try:
group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
if group_pkg_count_installed == 0:
response['results'].append("Group {0} already installed.".format(group))
else:
response['results'].append("Group {0} installed.".format(group))
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install group: {0}".format(group)
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
# In dnf 2.0 if all the mandatory packages in a group do
# not install, an error is raised. We want to capture
# this but still install as much as possible.
failure_response['failures'].append(" ".join((group, to_native(e))))
for environment in environments:
try:
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((environment, to_native(e))))
if module_specs and not self.with_modules:
# This means that the group or env wasn't found in comps
self.module.fail_json(
msg="No group {0} available.".format(module_specs[0]),
results=[],
)
# Install packages.
if self.update_only:
not_installed = self._update_only(pkg_specs)
for spec in not_installed:
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
install_result = self._mark_package_install(pkg_spec)
if install_result['failed']:
if install_result['msg']:
failure_response['msg'] += install_result['msg']
failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
else:
if install_result['msg']:
response['results'].append(install_result['msg'])
elif self.state == 'latest':
# "latest" is same as "installed" for filenames.
self._install_remote_rpms(filenames)
for filename in filenames:
response['results'].append("Installed {0}".format(filename))
# Upgrade modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if self._is_module_installed(module):
response['results'].append("Module {0} upgraded.".format(module))
self.module_base.upgrade([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
for group in groups:
try:
try:
self.base.group_upgrade(group)
response['results'].append("Group {0} upgraded.".format(group))
except dnf.exceptions.CompsError:
if not self.update_only:
# If not already installed, try to install.
group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
if group_pkg_count_installed == 0:
response['results'].append("Group {0} already installed.".format(group))
else:
response['results'].append("Group {0} installed.".format(group))
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((group, to_native(e))))
for environment in environments:
try:
try:
self.base.environment_upgrade(environment)
except dnf.exceptions.CompsError:
# If not already installed, try to install.
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((environment, to_native(e))))
if self.update_only:
not_installed = self._update_only(pkg_specs)
for spec in not_installed:
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
# best effort causes to install the latest package
# even if not previously installed
self.base.conf.best = True
install_result = self._mark_package_install(pkg_spec, upgrade=True)
if install_result['failed']:
if install_result['msg']:
failure_response['msg'] += install_result['msg']
failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
else:
if install_result['msg']:
response['results'].append(install_result['msg'])
else:
# state == absent
if filenames:
self.module.fail_json(
msg="Cannot remove paths -- please specify package name.",
results=[],
)
# Remove modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if self._is_module_installed(module):
response['results'].append("Module {0} removed.".format(module))
self.module_base.remove([module])
self.module_base.disable([module])
self.module_base.reset([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
for group in groups:
try:
self.base.group_remove(group)
except dnf.exceptions.CompsError:
# Group is already uninstalled.
pass
except AttributeError:
# Group either isn't installed or wasn't marked installed at install time
# because of DNF bug
#
# This is necessary until the upstream dnf API bug is fixed where installing
# a group via the dnf API doesn't actually mark the group as installed
# https://bugzilla.redhat.com/show_bug.cgi?id=1620324
pass
for environment in environments:
try:
self.base.environment_remove(environment)
except dnf.exceptions.CompsError:
# Environment is already uninstalled.
pass
installed = self.base.sack.query().installed()
for pkg_spec in pkg_specs:
# short-circuit installed check for wildcard matching
if '*' in pkg_spec:
try:
self.base.remove(pkg_spec)
except dnf.exceptions.MarkingError as e:
is_failure, handled_remove_error = self._sanitize_dnf_error_msg_remove(pkg_spec, to_native(e))
if is_failure:
failure_response['failures'].append('{0} - {1}'.format(pkg_spec, to_native(e)))
else:
response['results'].append(handled_remove_error)
continue
installed_pkg = dnf.subject.Subject(pkg_spec).get_best_query(
sack=self.base.sack).installed().run()
for pkg in installed_pkg:
self.base.remove(str(pkg))
# Like the dnf CLI we want to allow recursive removal of dependent
# packages
self.allowerasing = True
if self.autoremove:
self.base.autoremove()
try:
if not self.base.resolve(allow_erasing=self.allowerasing):
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
response['msg'] = "Nothing to do"
self.module.exit_json(**response)
else:
response['changed'] = True
# If packages got installed/removed, add them to the results.
# We do this early so we can use it for both check_mode and not.
if self.download_only:
install_action = 'Downloaded'
else:
install_action = 'Installed'
for package in self.base.transaction.install_set:
response['results'].append("{0}: {1}".format(install_action, package))
for package in self.base.transaction.remove_set:
response['results'].append("Removed: {0}".format(package))
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
if self.module.check_mode:
response['msg'] = "Check mode: No changes made, but would have if not in check mode"
self.module.exit_json(**response)
try:
if self.download_only and self.download_dir and self.base.conf.destdir:
dnf.util.ensure_dir(self.base.conf.destdir)
self.base.repos.all().pkgdir = self.base.conf.destdir
self.base.download_packages(self.base.transaction.install_set)
except dnf.exceptions.DownloadError as e:
self.module.fail_json(
msg="Failed to download packages: {0}".format(to_text(e)),
results=[],
)
# Validate GPG. This is NOT done in dnf.Base (it's done in the
# upstream CLI subclass of dnf.Base)
if not self.disable_gpg_check:
for package in self.base.transaction.install_set:
fail = False
gpgres, gpgerr = self.base._sig_check_pkg(package)
if gpgres == 0: # validated successfully
continue
elif gpgres == 1: # validation failed, install cert?
try:
self.base._get_key_for_package(package)
except dnf.exceptions.Error as e:
fail = True
else: # fatal error
fail = True
if fail:
msg = 'Failed to validate GPG signature for {0}'.format(package)
self.module.fail_json(msg)
if self.download_only:
# No further work left to do, and the results were already updated above.
# Just return them.
self.module.exit_json(**response)
else:
self.base.do_transaction()
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.exit_json(**response)
self.module.exit_json(**response)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
if to_text("already installed") in to_text(e):
response['changed'] = False
response['results'].append("Package already installed: {0}".format(to_native(e)))
self.module.exit_json(**response)
else:
failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
@staticmethod
def has_dnf():
return HAS_DNF
def run(self):
"""The main function."""
# Check if autoremove is called correctly
if self.autoremove:
if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'):
self.module.fail_json(
msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__,
results=[],
)
# Check if download_dir is called correctly
if self.download_dir:
if LooseVersion(dnf.__version__) < LooseVersion('2.6.2'):
self.module.fail_json(
msg="download_dir requires dnf>=2.6.2. Current dnf version is %s" % dnf.__version__,
results=[],
)
if self.update_cache and not self.names and not self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot
)
self.module.exit_json(
msg="Cache updated",
changed=False,
results=[],
rc=0
)
# Set state as installed by default
# This is not set in AnsibleModule() because the following shouldn't happen
# - dnf: autoremove=yes state=installed
if self.state is None:
self.state = 'installed'
if self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot
)
self.list_items(self.list)
else:
# Note: base takes a long time to run so we want to check for failure
# before running it.
if not dnf.util.am_i_root():
self.module.fail_json(
msg="This command has to be run under the root user.",
results=[],
)
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot
)
if self.with_modules:
self.module_base = dnf.module.module_base.ModuleBase(self.base)
self.ensure()
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
# Extend yumdnf_argument_spec with dnf-specific features that will never be
# backported to yum because yum is now in "maintenance mode" upstream
yumdnf_argument_spec['argument_spec']['allowerasing'] = dict(default=False, type='bool')
yumdnf_argument_spec['argument_spec']['nobest'] = dict(default=False, type='bool')
module = AnsibleModule(
**yumdnf_argument_spec
)
module_implementation = DnfModule(module)
try:
module_implementation.run()
except dnf.exceptions.RepoError as de:
module.fail_json(
msg="Failed to synchronize repodata: {0}".format(to_native(de)),
rc=1,
results=[],
changed=False
)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,126,044,989,395,313,000 | 38.495575 | 156 | 0.546064 | false |
ppietrasa/grpc | tools/distrib/check_copyright.py | 1 | 5538 | #!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import datetime
import os
import re
import sys
import subprocess
# find our home
ROOT = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
# parse command line
argp = argparse.ArgumentParser(description='copyright checker')
argp.add_argument('-o', '--output',
default='details',
choices=['list', 'details'])
argp.add_argument('-s', '--skips',
default=0,
action='store_const',
const=1)
argp.add_argument('-a', '--ancient',
default=0,
action='store_const',
const=1)
argp.add_argument('-f', '--fix',
default=False,
action='store_true');
argp.add_argument('--precommit',
default=False,
action='store_true')
args = argp.parse_args()
# open the license text
with open('LICENSE') as f:
LICENSE = f.read().splitlines()
# license format by file extension
# key is the file extension, value is a format string
# that given a line of license text, returns what should
# be in the file
LICENSE_PREFIX = {
'.bat': r'@rem\s*',
'.c': r'\s*(?://|\*)\s*',
'.cc': r'\s*(?://|\*)\s*',
'.h': r'\s*(?://|\*)\s*',
'.m': r'\s*\*\s*',
'.php': r'\s*\*\s*',
'.js': r'\s*\*\s*',
'.py': r'#\s*',
'.pyx': r'#\s*',
'.pxd': r'#\s*',
'.pxi': r'#\s*',
'.rb': r'#\s*',
'.sh': r'#\s*',
'.proto': r'//\s*',
'.cs': r'//\s*',
'.mak': r'#\s*',
'Makefile': r'#\s*',
'Dockerfile': r'#\s*',
'LICENSE': '',
}
_EXEMPT = frozenset((
# Generated protocol compiler output.
'examples/python/helloworld/helloworld_pb2.py',
'examples/python/helloworld/helloworld_pb2_grpc.py',
'examples/python/multiplex/helloworld_pb2.py',
'examples/python/multiplex/helloworld_pb2_grpc.py',
'examples/python/multiplex/route_guide_pb2.py',
'examples/python/multiplex/route_guide_pb2_grpc.py',
'examples/python/route_guide/route_guide_pb2.py',
'examples/python/route_guide/route_guide_pb2_grpc.py',
# An older file originally from outside gRPC.
'src/php/tests/bootstrap.php',
))
RE_YEAR = r'Copyright (?P<first_year>[0-9]+\-)?(?P<last_year>[0-9]+), Google Inc\.'
RE_LICENSE = dict(
(k, r'\n'.join(
LICENSE_PREFIX[k] +
(RE_YEAR if re.search(RE_YEAR, line) else re.escape(line))
for line in LICENSE))
for k, v in LICENSE_PREFIX.iteritems())
if args.precommit:
FILE_LIST_COMMAND = 'git status -z | grep -Poz \'(?<=^[MARC][MARCD ] )[^\s]+\''
else:
FILE_LIST_COMMAND = 'git ls-tree -r --name-only -r HEAD | grep -v ^third_party/'
def load(name):
with open(name) as f:
return f.read()
def save(name, text):
with open(name, 'w') as f:
f.write(text)
assert(re.search(RE_LICENSE['LICENSE'], load('LICENSE')))
assert(re.search(RE_LICENSE['Makefile'], load('Makefile')))
def log(cond, why, filename):
if not cond: return
if args.output == 'details':
print '%s: %s' % (why, filename)
else:
print filename
# scan files, validate the text
ok = True
filename_list = []
try:
filename_list = subprocess.check_output(FILE_LIST_COMMAND,
shell=True).splitlines()
except subprocess.CalledProcessError:
sys.exit(0)
for filename in filename_list:
if filename in _EXEMPT:
continue
ext = os.path.splitext(filename)[1]
base = os.path.basename(filename)
if ext in RE_LICENSE:
re_license = RE_LICENSE[ext]
elif base in RE_LICENSE:
re_license = RE_LICENSE[base]
else:
log(args.skips, 'skip', filename)
continue
try:
text = load(filename)
except:
continue
m = re.search(re_license, text)
if m:
pass
elif 'DO NOT EDIT' not in text and filename != 'src/boringssl/err_data.c':
log(1, 'copyright missing', filename)
ok = False
sys.exit(0 if ok else 1)
| bsd-3-clause | 5,643,336,900,783,706,000 | 30.465909 | 83 | 0.631636 | false |
foosel/OctoPrint | src/octoprint/access/users.py | 1 | 40260 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
from flask_login import UserMixin, AnonymousUserMixin
from werkzeug.local import LocalProxy
import hashlib
import io
import os
import yaml
import uuid
import wrapt
import time
import logging
# noinspection PyCompatibility
from builtins import range, bytes
from octoprint.settings import settings as s
from octoprint.util import atomic_write, to_bytes, deprecated, monotonic_time, generate_api_key
from octoprint.util import get_fully_qualified_classname as fqcn
from octoprint.access.permissions import Permissions, OctoPrintPermission
from octoprint.access.groups import GroupChangeListener, Group
from past.builtins import basestring
class UserManager(GroupChangeListener, object):
def __init__(self, group_manager, settings=None):
self._group_manager = group_manager
self._group_manager.register_listener(self)
self._logger = logging.getLogger(__name__)
self._session_users_by_session = dict()
self._sessionids_by_userid = dict()
self._enabled = True
if settings is None:
settings = s()
self._settings = settings
self._login_status_listeners = []
def register_login_status_listener(self, listener):
self._login_status_listeners.append(listener)
def unregister_login_status_listener(self, listener):
self._login_status_listeners.remove(listener)
def anonymous_user_factory(self):
if self.enabled:
return AnonymousUser([self._group_manager.guest_group])
else:
return AdminUser([self._group_manager.admin_group, self._group_manager.user_group])
def api_user_factory(self):
return ApiUser([self._group_manager.admin_group, self._group_manager.user_group])
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
def enable(self):
self._enabled = True
def disable(self):
self._enabled = False
def login_user(self, user):
self._cleanup_sessions()
if user is None or user.is_anonymous:
return
if isinstance(user, LocalProxy):
# noinspection PyProtectedMember
user = user._get_current_object()
if not isinstance(user, User):
return None
if not isinstance(user, SessionUser):
user = SessionUser(user)
self._session_users_by_session[user.session] = user
userid = user.get_id()
if not userid in self._sessionids_by_userid:
self._sessionids_by_userid[userid] = set()
self._sessionids_by_userid[userid].add(user.session)
for listener in self._login_status_listeners:
try:
listener.on_user_logged_in(user)
except Exception:
self._logger.exception("Error in on_user_logged_in on {!r}".format(listener),
extra=dict(callback=fqcn(listener)))
self._logger.info("Logged in user: {}".format(user.get_id()))
return user
def logout_user(self, user, stale=False):
if user is None or user.is_anonymous or isinstance(user, AdminUser):
return
if isinstance(user, LocalProxy):
user = user._get_current_object()
if not isinstance(user, SessionUser):
return
userid = user.get_id()
sessionid = user.session
if userid in self._sessionids_by_userid:
try:
self._sessionids_by_userid[userid].remove(sessionid)
except KeyError:
pass
if sessionid in self._session_users_by_session:
try:
del self._session_users_by_session[sessionid]
except KeyError:
pass
for listener in self._login_status_listeners:
try:
listener.on_user_logged_out(user, stale=stale)
except Exception:
self._logger.exception("Error in on_user_logged_out on {!r}".format(listener),
extra=dict(callback=fqcn(listener)))
self._logger.info("Logged out user: {}".format(user.get_id()))
def _cleanup_sessions(self):
for session, user in list(self._session_users_by_session.items()):
if not isinstance(user, SessionUser):
continue
if user.created + (24 * 60 * 60) < monotonic_time():
self._logger.info("Cleaning up user session {} for user {}".format(session, user.get_id()))
self.logout_user(user, stale=True)
@staticmethod
def create_password_hash(password, salt=None, settings=None):
if not salt:
if settings is None:
settings = s()
salt = settings.get(["accessControl", "salt"])
if salt is None:
import string
from random import choice
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
salt = "".join(choice(chars) for _ in range(32))
settings.set(["accessControl", "salt"], salt)
settings.save()
return hashlib.sha512(to_bytes(password, encoding="utf-8", errors="replace") + to_bytes(salt)).hexdigest()
def check_password(self, username, password):
user = self.find_user(username)
if not user:
return False
hash = UserManager.create_password_hash(password,
settings=self._settings)
if user.check_password(hash):
# new hash matches, correct password
return True
else:
# new hash doesn't match, but maybe the old one does, so check that!
oldHash = UserManager.create_password_hash(password,
salt="mvBUTvwzBzD3yPwvnJ4E4tXNf3CGJvvW",
settings=self._settings)
if user.check_password(oldHash):
# old hash matches, we migrate the stored password hash to the new one and return True since it's the correct password
self.change_user_password(username, password)
return True
else:
# old hash doesn't match either, wrong password
return False
def add_user(self, username, password, active, permissions, groups, overwrite=False):
pass
def change_user_activation(self, username, active):
pass
def change_user_permissions(self, username, permissions):
pass
def add_permissions_to_user(self, username, permissions):
pass
def remove_permissions_from_user(self, username, permissions):
pass
def change_user_groups(self, username, groups):
pass
def add_groups_to_user(self, username, groups):
pass
def remove_groups_from_user(self, username, groups):
pass
def remove_groups_from_users(self, group):
pass
def change_user_password(self, username, password):
pass
def get_user_setting(self, username, key):
return None
def get_all_user_settings(self, username):
return dict()
def change_user_setting(self, username, key, value):
pass
def change_user_settings(self, username, new_settings):
pass
def remove_user(self, username):
if username in self._sessionids_by_userid:
sessions = self._sessionids_by_userid[username]
for session in sessions:
if session in self._session_users_by_session:
del self._session_users_by_session[session]
del self._sessionids_by_userid[username]
def find_user(self, userid=None, session=None):
if session is not None and session in self._session_users_by_session:
user = self._session_users_by_session[session]
if userid is None or userid == user.get_id():
return user
return None
def find_sessions_for(self, matcher):
result = []
for user in self.get_all_users():
if matcher(user):
try:
session_ids = self._sessionids_by_userid[user.get_id()]
for session_id in session_ids:
try:
result.append(self._session_users_by_session[session_id])
except KeyError:
# unknown session after all
continue
except KeyError:
# no session for user
pass
return result
def get_all_users(self):
return []
def has_been_customized(self):
return False
def on_group_removed(self, group):
self._logger.debug("Group {} got removed, removing from all users".format(group.key))
self.remove_groups_from_users([group])
def on_group_permissions_changed(self, group, added=None, removed=None):
users = self.find_sessions_for(lambda u: group in u.groups)
for listener in self._login_status_listeners:
try:
for user in users:
listener.on_user_modified(user)
except Exception:
self._logger.exception("Error in on_user_modified on {!r}".format(listener),
extra=dict(callback=fqcn(listener)))
def on_group_subgroups_changed(self, group, added=None, removed=None):
users = self.find_sessions_for(lambda u: group in u.groups)
for listener in self._login_status_listeners:
# noinspection PyBroadException
try:
for user in users:
listener.on_user_modified(user)
except Exception:
self._logger.exception("Error in on_user_modified on {!r}".format(listener),
extra=dict(callback=fqcn(listener)))
def _trigger_on_user_modified(self, user):
if isinstance(user, basestring):
# user id
users = []
try:
session_ids = self._sessionids_by_userid[user]
for session_id in session_ids:
try:
users.append(self._session_users_by_session[session_id])
except KeyError:
# unknown session id
continue
except KeyError:
# no session for user
return
elif isinstance(user, User) and not isinstance(user, SessionUser):
users = self.find_sessions_for(lambda u: u.get_id() == user.get_id())
elif isinstance(user, User):
users = [user]
else:
return
for listener in self._login_status_listeners:
try:
for user in users:
listener.on_user_modified(user)
except Exception:
self._logger.exception("Error in on_user_modified on {!r}".format(listener),
extra=dict(callback=fqcn(listener)))
def _trigger_on_user_removed(self, username):
for listener in self._login_status_listeners:
try:
listener.on_user_removed(username)
except Exception:
self._logger.exception("Error in on_user_removed on {!r}".format(listener),
extra=dict(callback=fqcn(listener)))
#~~ Deprecated methods follow
# TODO: Remove deprecated methods in OctoPrint 1.5.0
@staticmethod
def createPasswordHash(*args, **kwargs):
"""
.. deprecated: 1.4.0
Replaced by :func:`~UserManager.create_password_hash`
"""
# we can't use the deprecated decorator here since this method is static
import warnings
warnings.warn("createPasswordHash has been renamed to create_password_hash", DeprecationWarning, stacklevel=2)
return UserManager.create_password_hash(*args, **kwargs)
@deprecated("changeUserRoles has been replaced by change_user_permissions",
includedoc="Replaced by :func:`change_user_permissions`",
since="1.4.0")
def changeUserRoles(self, username, roles):
user = self.find_user(username)
if user is None:
raise UnknownUser(username)
removed_roles = set(user._roles) - set(roles)
self.removeRolesFromUser(username, removed_roles, user=user)
added_roles = set(roles) - set(user._roles)
self.addRolesToUser(username, added_roles, user=user)
@deprecated("addRolesToUser has been replaced by add_permissions_to_user",
includedoc="Replaced by :func:`add_permissions_to_user`",
since="1.4.0")
def addRolesToUser(self, username, roles, user=None):
if user is None:
user = self.find_user(username)
if user is None:
raise UnknownUser(username)
if "admin" in roles:
self.add_groups_to_user(username, self._group_manager.admin_group)
if "user" in roles:
self.remove_groups_from_user(username, self._group_manager.user_group)
@deprecated("removeRolesFromUser has been replaced by remove_permissions_from_user",
includedoc="Replaced by :func:`remove_permissions_from_user`",
since="1.4.0")
def removeRolesFromUser(self, username, roles, user=None):
if user is None:
user = self.find_user(username)
if user is None:
raise UnknownUser(username)
if "admin" in roles:
self.remove_groups_from_user(username, self._group_manager.admin_group)
self.remove_permissions_from_user(username, Permissions.ADMIN)
if "user" in roles:
self.remove_groups_from_user(username, self._group_manager.user_group)
checkPassword = deprecated("checkPassword has been renamed to check_password",
includedoc="Replaced by :func:`check_password`",
since="1.4.0")(check_password)
addUser = deprecated("addUser has been renamed to add_user",
includedoc="Replaced by :func:`add_user`",
since="1.4.0")(add_user)
changeUserActivation = deprecated("changeUserActivation has been renamed to change_user_activation",
includedoc="Replaced by :func:`change_user_activation`",
since="1.4.0")(change_user_activation)
changeUserPassword = deprecated("changeUserPassword has been renamed to change_user_password",
includedoc="Replaced by :func:`change_user_password`",
since="1.4.0")(change_user_password)
getUserSetting = deprecated("getUserSetting has been renamed to get_user_setting",
includedoc="Replaced by :func:`get_user_setting`",
since="1.4.0")(get_user_setting)
getAllUserSettings = deprecated("getAllUserSettings has been renamed to get_all_user_settings",
includedoc="Replaced by :func:`get_all_user_settings`",
since="1.4.0")(get_all_user_settings)
changeUserSetting = deprecated("changeUserSetting has been renamed to change_user_setting",
includedoc="Replaced by :func:`change_user_setting`",
since="1.4.0")(change_user_setting)
changeUserSettings = deprecated("changeUserSettings has been renamed to change_user_settings",
includedoc="Replaced by :func:`change_user_settings`",
since="1.4.0")(change_user_settings)
removeUser = deprecated("removeUser has been renamed to remove_user",
includedoc="Replaced by :func:`remove_user`",
since="1.4.0")(remove_user)
findUser = deprecated("findUser has been renamed to find_user",
includedoc="Replaced by :func:`find_user`",
since="1.4.0")(find_user)
getAllUsers = deprecated("getAllUsers has been renamed to get_all_users",
includedoc="Replaced by :func:`get_all_users`",
since="1.4.0")(get_all_users)
hasBeenCustomized = deprecated("hasBeenCustomized has been renamed to has_been_customized",
includedoc="Replaced by :func:`has_been_customized`",
since="1.4.0")(has_been_customized)
class LoginStatusListener(object):
def on_user_logged_in(self, user):
pass
def on_user_logged_out(self, user, stale=False):
pass
def on_user_modified(self, user):
pass
def on_user_removed(self, userid):
pass
##~~ FilebasedUserManager, takes available users from users.yaml file
class FilebasedUserManager(UserManager):
def __init__(self, group_manager, path=None, settings=None):
UserManager.__init__(self, group_manager, settings=settings)
if path is None:
path = self._settings.get(["accessControl", "userfile"])
if path is None:
path = os.path.join(s().getBaseFolder("base"), "users.yaml")
self._userfile = path
self._users = {}
self._dirty = False
self._customized = None
self._load()
def _load(self):
if os.path.exists(self._userfile) and os.path.isfile(self._userfile):
self._customized = True
with io.open(self._userfile, 'rt', encoding='utf-8') as f:
data = yaml.safe_load(f)
for name, attributes in data.items():
permissions = []
if "permissions" in attributes:
permissions = attributes["permissions"]
groups = {self._group_manager.user_group} # the user group is mandatory for all logged in users
if "groups" in attributes:
groups |= set(attributes["groups"])
# migrate from roles to permissions
if "roles" in attributes and not "permissions" in attributes:
self._logger.info("Migrating user {} to new granular permission system".format(name))
groups |= set(self._migrate_roles_to_groups(attributes["roles"]))
self._dirty = True
apikey = None
if "apikey" in attributes:
apikey = attributes["apikey"]
settings = dict()
if "settings" in attributes:
settings = attributes["settings"]
self._users[name] = User(username=name,
passwordHash=attributes["password"],
active=attributes["active"],
permissions=self._to_permissions(*permissions),
groups=self._to_groups(*groups),
apikey=apikey,
settings=settings)
for sessionid in self._sessionids_by_userid.get(name, set()):
if sessionid in self._session_users_by_session:
self._session_users_by_session[sessionid].update_user(self._users[name])
if self._dirty:
self._save()
else:
self._customized = False
def _save(self, force=False):
if not self._dirty and not force:
return
data = {}
for name, user in self._users.items():
if not user or not isinstance(user, User):
continue
data[name] = {
"password": user._passwordHash,
"active": user._active,
"groups": self._from_groups(*user._groups),
"permissions": self._from_permissions(*user._permissions),
"apikey": user._apikey,
"settings": user._settings,
# TODO: deprecated, remove in 1.5.0
"roles": user._roles
}
with atomic_write(self._userfile, mode='wt', permissions=0o600, max_permissions=0o666) as f:
yaml.safe_dump(data, f, default_flow_style=False, indent=4, allow_unicode=True)
self._dirty = False
self._load()
def _migrate_roles_to_groups(self, roles):
# If admin is inside the roles, just return admin group
if "admin" in roles:
return [self._group_manager.admin_group, self._group_manager.user_group]
else:
return [self._group_manager.user_group]
def _refresh_groups(self, user):
user._groups = self._to_groups(*map(lambda g: g.key, user.groups))
def add_user(self, username, password, active=False, permissions=None, groups=None, apikey=None, overwrite=False):
if not permissions:
permissions = []
permissions = self._to_permissions(*permissions)
if not groups:
groups = self._group_manager.default_groups
groups = self._to_groups(*groups)
if username in self._users and not overwrite:
raise UserAlreadyExists(username)
self._users[username] = User(username,
UserManager.create_password_hash(password,
settings=self._settings),
active,
permissions,
groups,
apikey=apikey)
self._dirty = True
self._save()
def change_user_activation(self, username, active):
if username not in self._users:
raise UnknownUser(username)
if self._users[username].is_active != active:
self._users[username]._active = active
self._dirty = True
self._save()
self._trigger_on_user_modified(username)
def change_user_permissions(self, username, permissions):
if username not in self._users:
raise UnknownUser(username)
user = self._users[username]
permissions = self._to_permissions(*permissions)
removed_permissions = list(set(user._permissions) - set(permissions))
added_permissions = list(set(permissions) - set(user._permissions))
if len(removed_permissions) > 0:
user.remove_permissions_from_user(removed_permissions)
self._dirty = True
if len(added_permissions) > 0:
user.add_permissions_to_user(added_permissions)
self._dirty = True
if self._dirty:
self._save()
self._trigger_on_user_modified(username)
def add_permissions_to_user(self, username, permissions):
if username not in self._users:
raise UnknownUser(username)
if self._users[username].add_permissions_to_user(self._to_permissions(*permissions)):
self._dirty = True
self._save()
self._trigger_on_user_modified(username)
def remove_permissions_from_user(self, username, permissions):
if username not in self._users:
raise UnknownUser(username)
if self._users[username].remove_permissions_from_user(self._to_permissions(*permissions)):
self._dirty = True
self._save()
self._trigger_on_user_modified(username)
def remove_permissions_from_users(self, permissions):
modified = []
for user in self._users:
dirty = user.remove_permissions_from_user(self._to_permissions(*permissions))
if dirty:
self._dirty = True
modified.append(user.get_id())
if self._dirty:
self._save()
for username in modified:
self._trigger_on_user_modified(username)
def change_user_groups(self, username, groups):
if username not in self._users:
raise UnknownUser(username)
user = self._users[username]
groups = self._to_groups(*groups)
removed_groups = list(set(user._groups) - set(groups))
added_groups = list(set(groups) - set(user._groups))
if len(removed_groups):
self._dirty |= user.remove_groups_from_user(removed_groups)
if len(added_groups):
self._dirty |= user.add_groups_to_user(added_groups)
if self._dirty:
self._save()
self._trigger_on_user_modified(username)
def add_groups_to_user(self, username, groups, save=True, notify=True):
if username not in self._users:
raise UnknownUser(username)
if self._users[username].add_groups_to_user(self._to_groups(*groups)):
self._dirty = True
if save:
self._save()
if notify:
self._trigger_on_user_modified(username)
def remove_groups_from_user(self, username, groups, save=True, notify=True):
if username not in self._users:
raise UnknownUser(username)
if self._users[username].remove_groups_from_user(self._to_groups(*groups)):
self._dirty = True
if save:
self._save()
if notify:
self._trigger_on_user_modified(username)
def remove_groups_from_users(self, groups):
modified = []
for username, user in self._users.items():
dirty = user.remove_groups_from_user(self._to_groups(*groups))
if dirty:
self._dirty = True
modified.append(username)
if self._dirty:
self._save()
for username in modified:
self._trigger_on_user_modified(username)
def change_user_password(self, username, password):
if not username in self._users:
raise UnknownUser(username)
passwordHash = UserManager.create_password_hash(password,
settings=self._settings)
user = self._users[username]
if user._passwordHash != passwordHash:
user._passwordHash = passwordHash
self._dirty = True
self._save()
def change_user_setting(self, username, key, value):
if not username in self._users:
raise UnknownUser(username)
user = self._users[username]
old_value = user.get_setting(key)
if not old_value or old_value != value:
user.set_setting(key, value)
self._dirty = self._dirty or old_value != value
self._save()
def change_user_settings(self, username, new_settings):
if not username in self._users:
raise UnknownUser(username)
user = self._users[username]
for key, value in new_settings.items():
old_value = user.get_setting(key)
user.set_setting(key, value)
self._dirty = self._dirty or old_value != value
self._save()
def get_all_user_settings(self, username):
if not username in self._users:
raise UnknownUser(username)
user = self._users[username]
return user.get_all_settings()
def get_user_setting(self, username, key):
if not username in self._users:
raise UnknownUser(username)
user = self._users[username]
return user.get_setting(key)
def generate_api_key(self, username):
if not username in self._users:
raise UnknownUser(username)
user = self._users[username]
user._apikey = generate_api_key()
self._dirty = True
self._save()
return user._apikey
def delete_api_key(self, username):
if not username in self._users:
raise UnknownUser(username)
user = self._users[username]
user._apikey = None
self._dirty = True
self._save()
def remove_user(self, username):
UserManager.remove_user(self, username)
if not username in self._users:
raise UnknownUser(username)
del self._users[username]
self._dirty = True
self._save()
def find_user(self, userid=None, apikey=None, session=None):
user = UserManager.find_user(self, userid=userid, session=session)
if user is not None:
return user
if userid is not None:
if userid not in self._users:
return None
return self._users[userid]
elif apikey is not None:
for user in self._users.values():
if apikey == user._apikey:
return user
return None
else:
return None
def get_all_users(self):
return list(self._users.values())
def has_been_customized(self):
return self._customized
def on_group_permissions_changed(self, group, added=None, removed=None):
# refresh our group references
for user in self.get_all_users():
if group in user.groups:
self._refresh_groups(user)
# call parent
UserManager.on_group_permissions_changed(self, group, added=added, removed=removed)
def on_group_subgroups_changed(self, group, added=None, removed=None):
# refresh our group references
for user in self.get_all_users():
if group in user.groups:
self._refresh_groups(user)
# call parent
UserManager.on_group_subgroups_changed(self, group, added=added, removed=removed)
#~~ Helpers
def _to_groups(self, *groups):
return list(set(filter(lambda x: x is not None,
(self._group_manager._to_group(group) for group in groups))))
def _to_permissions(self, *permissions):
return list(set(filter(lambda x: x is not None,
(Permissions.find(permission) for permission in permissions))))
def _from_groups(self, *groups):
return list(set(group.key for group in groups))
def _from_permissions(self, *permissions):
return list(set(permission.key for permission in permissions))
# ~~ Deprecated methods follow
# TODO: Remove deprecated methods in OctoPrint 1.5.0
generateApiKey = deprecated("generateApiKey has been renamed to generate_api_key",
includedoc="Replaced by :func:`generate_api_key`",
since="1.4.0")(generate_api_key)
deleteApiKey = deprecated("deleteApiKey has been renamed to delete_api_key",
includedoc="Replaced by :func:`delete_api_key`",
since="1.4.0")(delete_api_key)
addUser = deprecated("addUser has been renamed to add_user",
includedoc="Replaced by :func:`add_user`",
since="1.4.0")(add_user)
changeUserActivation = deprecated("changeUserActivation has been renamed to change_user_activation",
includedoc="Replaced by :func:`change_user_activation`",
since="1.4.0")(change_user_activation)
changeUserPassword = deprecated("changeUserPassword has been renamed to change_user_password",
includedoc="Replaced by :func:`change_user_password`",
since="1.4.0")(change_user_password)
getUserSetting = deprecated("getUserSetting has been renamed to get_user_setting",
includedoc="Replaced by :func:`get_user_setting`",
since="1.4.0")(get_user_setting)
getAllUserSettings = deprecated("getAllUserSettings has been renamed to get_all_user_settings",
includedoc="Replaced by :func:`get_all_user_settings`",
since="1.4.0")(get_all_user_settings)
changeUserSetting = deprecated("changeUserSetting has been renamed to change_user_setting",
includedoc="Replaced by :func:`change_user_setting`",
since="1.4.0")(change_user_setting)
changeUserSettings = deprecated("changeUserSettings has been renamed to change_user_settings",
includedoc="Replaced by :func:`change_user_settings`",
since="1.4.0")(change_user_settings)
removeUser = deprecated("removeUser has been renamed to remove_user",
includedoc="Replaced by :func:`remove_user`",
since="1.4.0")(remove_user)
findUser = deprecated("findUser has been renamed to find_user",
includedoc="Replaced by :func:`find_user`",
since="1.4.0")(find_user)
getAllUsers = deprecated("getAllUsers has been renamed to get_all_users",
includedoc="Replaced by :func:`get_all_users`",
since="1.4.0")(get_all_users)
hasBeenCustomized = deprecated("hasBeenCustomized has been renamed to has_been_customized",
includedoc="Replaced by :func:`has_been_customized`",
since="1.4.0")(has_been_customized)
##~~ Exceptions
class UserAlreadyExists(Exception):
def __init__(self, username):
Exception.__init__(self, "User %s already exists" % username)
class UnknownUser(Exception):
def __init__(self, username):
Exception.__init__(self, "Unknown user: %s" % username)
class UnknownRole(Exception):
def _init_(self, role):
Exception.__init__(self, "Unknown role: %s" % role)
##~~ Refactoring helpers
class MethodReplacedByBooleanProperty(object):
def __init__(self, name, message, getter):
self._name = name
self._message = message
self._getter = getter
@property
def _attr(self):
return self._getter()
def __call__(self):
from warnings import warn
warn(DeprecationWarning(self._message.format(name=self._name)), stacklevel=2)
return self._attr
def __eq__(self, other):
return self._attr == other
def __ne__(self, other):
return self._attr != other
def __bool__(self):
# Python 3
return self._attr
def __nonzero__(self):
# Python 2
return self._attr
def __hash__(self):
return hash(self._attr)
def __repr__(self):
return "MethodReplacedByProperty({}, {}, {})".format(self._name, self._message, self._getter)
def __str__(self):
return str(self._attr)
# TODO: Remove compatibility layer in OctoPrint 1.5.0
class FlaskLoginMethodReplacedByBooleanProperty(MethodReplacedByBooleanProperty):
def __init__(self, name, getter):
message = "{name} is now a property in Flask-Login versions >= 0.3.0, which OctoPrint now uses. " + \
"Use {name} instead of {name}(). This compatibility layer will be removed in OctoPrint 1.5.0."
MethodReplacedByBooleanProperty.__init__(self, name, message, getter)
# TODO: Remove compatibility layer in OctoPrint 1.5.0
class OctoPrintUserMethodReplacedByBooleanProperty(MethodReplacedByBooleanProperty):
def __init__(self, name, getter):
message = "{name} is now a property for consistency reasons with Flask-Login versions >= 0.3.0, which " + \
"OctoPrint now uses. Use {name} instead of {name}(). This compatibility layer will be removed " + \
"in OctoPrint 1.5.0."
MethodReplacedByBooleanProperty.__init__(self, name, message, getter)
##~~ User object
class User(UserMixin):
def __init__(self, username, passwordHash, active, permissions=None, groups=None, apikey=None, settings=None):
if permissions is None:
permissions = []
if groups is None:
groups = []
self._username = username
self._passwordHash = passwordHash
self._active = active
self._permissions = permissions
self._groups = groups
self._apikey = apikey
if settings is None:
settings = dict()
self._settings = settings
def as_dict(self):
from octoprint.access.permissions import OctoPrintPermission
return {
"name": self._username,
"active": bool(self.is_active),
"permissions": list(map(lambda p: p.key, self._permissions)),
"groups": list(map(lambda g: g.key, self._groups)),
"needs": OctoPrintPermission.convert_needs_to_dict(self.needs),
"apikey": self._apikey,
"settings": self._settings,
# TODO: deprecated, remove in 1.5.0
"admin": self.has_permission(Permissions.ADMIN),
"user": not self.is_anonymous,
"roles": self._roles
}
def check_password(self, passwordHash):
return self._passwordHash == passwordHash
def get_id(self):
return self.get_name()
def get_name(self):
return self._username
@property
def is_anonymous(self):
return FlaskLoginMethodReplacedByBooleanProperty("is_anonymous", lambda: False)
@property
def is_authenticated(self):
return FlaskLoginMethodReplacedByBooleanProperty("is_authenticated", lambda: True)
@property
def is_active(self):
return FlaskLoginMethodReplacedByBooleanProperty("is_active", lambda: self._active)
def get_all_settings(self):
return self._settings
def get_setting(self, key):
if not isinstance(key, (tuple, list)):
path = [key]
else:
path = key
return self._get_setting(path)
def set_setting(self, key, value):
if not isinstance(key, (tuple, list)):
path = [key]
else:
path = key
return self._set_setting(path, value)
def _get_setting(self, path):
s = self._settings
for p in path:
if isinstance(s, dict) and p in s:
s = s[p]
else:
return None
return s
def _set_setting(self, path, value):
s = self._settings
for p in path[:-1]:
if p not in s:
s[p] = dict()
if not isinstance(s[p], dict):
s[p] = dict()
s = s[p]
key = path[-1]
s[key] = value
return True
def add_permissions_to_user(self, permissions):
# Make sure the permissions variable is of type list
if not isinstance(permissions, list):
permissions = [permissions]
assert(all(map(lambda p: isinstance(p, OctoPrintPermission), permissions)))
dirty = False
for permission in permissions:
if permissions not in self._permissions:
self._permissions.append(permission)
dirty = True
return dirty
def remove_permissions_from_user(self, permissions):
# Make sure the permissions variable is of type list
if not isinstance(permissions, list):
permissions = [permissions]
assert(all(map(lambda p: isinstance(p, OctoPrintPermission), permissions)))
dirty = False
for permission in permissions:
if permission in self._permissions:
self._permissions.remove(permission)
dirty = True
return dirty
def add_groups_to_user(self, groups):
# Make sure the groups variable is of type list
if not isinstance(groups, list):
groups = [groups]
assert(all(map(lambda p: isinstance(p, Group), groups)))
dirty = False
for group in groups:
if group.is_toggleable() and group not in self._groups:
self._groups.append(group)
dirty = True
return dirty
def remove_groups_from_user(self, groups):
# Make sure the groups variable is of type list
if not isinstance(groups, list):
groups = [groups]
assert(all(map(lambda p: isinstance(p, Group), groups)))
dirty = False
for group in groups:
if group.is_toggleable() and group in self._groups:
self._groups.remove(group)
dirty = True
return dirty
@property
def permissions(self):
if self._permissions is None:
return []
if Permissions.ADMIN in self._permissions:
return Permissions.all()
return list(filter(lambda p: p is not None, self._permissions))
@property
def groups(self):
return list(self._groups)
@property
def effective_permissions(self):
if self._permissions is None:
return []
return list(filter(lambda p: p is not None and self.has_permission(p), Permissions.all()))
@property
def needs(self):
needs = set()
for permission in self.permissions:
if permission is not None:
needs = needs.union(permission.needs)
for group in self.groups:
if group is not None:
needs = needs.union(group.needs)
return needs
def has_permission(self, permission):
return self.has_needs(*permission.needs)
def has_needs(self, *needs):
return set(needs).issubset(self.needs)
def __repr__(self):
return "User(id=%s,name=%s,active=%r,user=True,admin=%r,permissions=%s,groups=%s)" % (self.get_id(), self.get_name(), bool(self.is_active), self.has_permission(Permissions.ADMIN), self._permissions, self._groups)
# ~~ Deprecated methods & properties follow
# TODO: Remove deprecated methods & properties in OctoPrint 1.5.0
asDict = deprecated("asDict has been renamed to as_dict",
includedoc="Replaced by :func:`as_dict`",
since="1.4.0")(as_dict)
@property
@deprecated("is_user is deprecated, please use has_permission", since="1.4.0")
def is_user(self):
return OctoPrintUserMethodReplacedByBooleanProperty("is_user", lambda: not self.is_anonymous)
@property
@deprecated("is_admin is deprecated, please use has_permission", since="1.4.0")
def is_admin(self):
return OctoPrintUserMethodReplacedByBooleanProperty("is_admin", lambda: self.has_permission(Permissions.ADMIN))
@property
@deprecated("roles is deprecated, please use has_permission", since="1.4.0")
def roles(self):
return self._roles
@property
def _roles(self):
"""Helper for the deprecated self.roles and serializing to yaml"""
if self.has_permission(Permissions.ADMIN):
return ["user", "admin"]
elif not self.is_anonymous:
return ["user"]
else:
return []
class AnonymousUser(AnonymousUserMixin, User):
def __init__(self, groups):
User.__init__(self, None, "", True, [], groups)
@property
def is_anonymous(self):
return FlaskLoginMethodReplacedByBooleanProperty("is_anonymous", lambda: True)
@property
def is_authenticated(self):
return FlaskLoginMethodReplacedByBooleanProperty("is_authenticated", lambda: False)
@property
def is_active(self):
return FlaskLoginMethodReplacedByBooleanProperty("is_active", lambda: self._active)
def check_password(self, passwordHash):
return True
def as_dict(self):
from octoprint.access.permissions import OctoPrintPermission
return {
"needs": OctoPrintPermission.convert_needs_to_dict(self.needs)
}
def __repr__(self):
return "AnonymousUser(groups=%s)" % self._groups
class SessionUser(wrapt.ObjectProxy):
def __init__(self, user):
wrapt.ObjectProxy.__init__(self, user)
self._self_session = "".join('%02X' % z for z in bytes(uuid.uuid4().bytes))
self._self_created = monotonic_time()
self._self_touched = monotonic_time()
@property
def session(self):
return self._self_session
@property
def created(self):
return self._self_created
@property
def touched(self):
return self._self_touched
def touch(self):
self._self_touched = monotonic_time()
@deprecated("SessionUser.get_session() has been deprecated, use SessionUser.session instead", since="1.3.5")
def get_session(self):
return self.session
def update_user(self, user):
self.__wrapped__ = user
def as_dict(self):
result = self.__wrapped__.as_dict()
result.update(dict(session=self.session))
return result
def __repr__(self):
return "SessionUser({!r},session={},created={})".format(self.__wrapped__, self.session, self.created)
##~~ User object to use when global api key is used to access the API
class ApiUser(User):
def __init__(self, groups):
User.__init__(self, "_api", "", True, [], groups)
##~~ User object to use when access control is disabled
class AdminUser(User):
def __init__(self, groups):
User.__init__(self, "_admin", "", True, [], groups)
| agpl-3.0 | -4,884,698,765,578,790,000 | 30.650943 | 214 | 0.663214 | false |
QuantEcon/QuantEcon.py | quantecon/quad.py | 1 | 31180 | """
Defining various quadrature routines.
Based on the quadrature routines found in the CompEcon toolbox by
Miranda and Fackler.
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
"""
import math
import numpy as np
import scipy.linalg as la
from numba import jit, vectorize
from .ce_util import ckron, gridmake
from .util import check_random_state
__all__ = ['qnwcheb', 'qnwequi', 'qnwlege', 'qnwnorm', 'qnwlogn',
'qnwsimp', 'qnwtrap', 'qnwunif', 'quadrect', 'qnwbeta',
'qnwgamma']
@vectorize(nopython=True)
def gammaln(x):
return math.lgamma(x)
@vectorize(nopython=True)
def fix(x):
if x < 0:
return math.ceil(x)
else:
return math.floor(x)
# ------------------ #
# Exported Functions #
# ------------------ #
def qnwcheb(n, a=1, b=1):
"""
Computes multivariate Guass-Checbychev quadrature nodes and weights.
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwcheb`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwcheb1, n, a, b)
def qnwequi(n, a, b, kind="N", equidist_pp=None, random_state=None):
"""
Generates equidistributed sequences with property that averages
value of integrable function evaluated over the sequence converges
to the integral as n goes to infinity.
Parameters
----------
n : int
Number of sequence points
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
kind : string, optional(default="N")
One of the following:
- N - Neiderreiter (default)
- W - Weyl
- H - Haber
- R - pseudo Random
equidist_pp : array_like, optional(default=None)
TODO: I don't know what this does
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwequi`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
random_state = check_random_state(random_state)
if equidist_pp is None:
import sympy as sym
equidist_pp = np.sqrt(np.array(list(sym.primerange(0, 7920))))
n, a, b = list(map(np.atleast_1d, list(map(np.asarray, [n, a, b]))))
d = max(list(map(len, [n, a, b])))
n = np.prod(n)
if a.size == 1:
a = np.repeat(a, d)
if b.size == 1:
b = np.repeat(b, d)
i = np.arange(1, n + 1)
if kind.upper() == "N": # Neiderreiter
j = 2.0 ** (np.arange(1, d+1) / (d+1))
nodes = np.outer(i, j)
nodes = (nodes - fix(nodes)).squeeze()
elif kind.upper() == "W": # Weyl
j = equidist_pp[:d]
nodes = np.outer(i, j)
nodes = (nodes - fix(nodes)).squeeze()
elif kind.upper() == "H": # Haber
j = equidist_pp[:d]
nodes = np.outer(i * (i+1) / 2, j)
nodes = (nodes - fix(nodes)).squeeze()
elif kind.upper() == "R": # pseudo-random
nodes = random_state.rand(n, d).squeeze()
else:
raise ValueError("Unknown sequence requested")
# compute nodes and weights
r = b - a
nodes = a + nodes * r
weights = (np.prod(r) / n) * np.ones(n)
return nodes, weights
def qnwlege(n, a, b):
"""
Computes multivariate Guass-Legendre quadrature nodes and weights.
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwlege`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwlege1, n, a, b)
def qnwnorm(n, mu=None, sig2=None, usesqrtm=False):
"""
Computes nodes and weights for multivariate normal distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
mu : scalar or array_like(float), optional(default=zeros(d))
The means of each dimension of the random variable. If a scalar
is given, that constant is repeated d times, where d is the
number of dimensions
sig2 : array_like(float), optional(default=eye(d))
A d x d array representing the variance-covariance matrix of the
multivariate normal distribution.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwnorm`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
n = np.atleast_1d(n)
d = n.size
if mu is None:
mu = np.zeros(d)
else:
mu = np.atleast_1d(mu)
if sig2 is None:
sig2 = np.eye(d)
else:
sig2 = np.atleast_1d(sig2).reshape(d, d)
if all([x.size == 1 for x in [n, mu, sig2]]):
nodes, weights = _qnwnorm1(n[0])
else:
nodes = []
weights = []
for i in range(d):
_1d = _qnwnorm1(n[i])
nodes.append(_1d[0])
weights.append(_1d[1])
nodes = gridmake(*nodes)
weights = ckron(*weights[::-1])
if usesqrtm:
new_sig2 = la.sqrtm(sig2)
else: # cholesky
new_sig2 = la.cholesky(sig2)
if d > 1:
nodes = nodes.dot(new_sig2) + mu # Broadcast ok
else: # nodes.dot(sig) will not be aligned in scalar case.
nodes = nodes * new_sig2 + mu
return nodes.squeeze(), weights
def qnwlogn(n, mu=None, sig2=None):
"""
Computes nodes and weights for multivariate lognormal distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
mu : scalar or array_like(float), optional(default=zeros(d))
The means of each dimension of the random variable. If a scalar
is given, that constant is repeated d times, where d is the
number of dimensions
sig2 : array_like(float), optional(default=eye(d))
A d x d array representing the variance-covariance matrix of the
multivariate normal distribution.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwlogn`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
nodes, weights = qnwnorm(n, mu, sig2)
return np.exp(nodes), weights
def qnwsimp(n, a, b):
"""
Computes multivariate Simpson quadrature nodes and weights.
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwsimp`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwsimp1, n, a, b)
def qnwtrap(n, a, b):
"""
Computes multivariate trapezoid rule quadrature nodes and weights.
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwtrap`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwtrap1, n, a, b)
def qnwunif(n, a, b):
"""
Computes quadrature nodes and weights for multivariate uniform
distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwunif`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
n, a, b = list(map(np.asarray, [n, a, b]))
nodes, weights = qnwlege(n, a, b)
weights = weights / np.prod(b - a)
return nodes, weights
def quadrect(f, n, a, b, kind='lege', *args, **kwargs):
"""
Integrate the d-dimensional function f on a rectangle with lower and
upper bound for dimension i defined by a[i] and b[i], respectively;
using n[i] points.
Parameters
----------
f : function
The function to integrate over. This should be a function
that accepts as its first argument a matrix representing points
along each dimension (each dimension is a column). Other
arguments that need to be passed to the function are caught by
`*args` and `**kwargs`
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
kind : string, optional(default='lege')
Specifies which type of integration to perform. Valid
values are:
lege - Gauss-Legendre
cheb - Gauss-Chebyshev
trap - trapezoid rule
simp - Simpson rule
N - Neiderreiter equidistributed sequence
W - Weyl equidistributed sequence
H - Haber equidistributed sequence
R - Monte Carlo
*args, **kwargs :
Other arguments passed to the function f
Returns
-------
out : scalar (float)
The value of the integral on the region [a, b]
Notes
-----
Based of original function ``quadrect`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
if kind.lower() == "lege":
nodes, weights = qnwlege(n, a, b)
elif kind.lower() == "cheb":
nodes, weights = qnwcheb(n, a, b)
elif kind.lower() == "trap":
nodes, weights = qnwtrap(n, a, b)
elif kind.lower() == "simp":
nodes, weights = qnwsimp(n, a, b)
else:
nodes, weights = qnwequi(n, a, b, kind)
out = weights.dot(f(nodes, *args, **kwargs))
return out
def qnwbeta(n, a=1.0, b=1.0):
"""
Computes nodes and weights for beta distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float), optional(default=1.0)
A length-d
b : array_like(float), optional(default=1.0)
A d x d array representing the variance-covariance matrix of the
multivariate normal distribution.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwbeta`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwbeta1, n, a, b)
def qnwgamma(n, a=1.0, b=1.0, tol=3e-14):
"""
Computes nodes and weights for gamma distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float) : optional(default=ones(d))
Shape parameter of the gamma distribution parameter. Must be positive
b : scalar or array_like(float) : optional(default=ones(d))
Scale parameter of the gamma distribution parameter. Must be positive
tol : scalar or array_like(float) : optional(default=ones(d) * 3e-14)
Tolerance parameter for newton iterations for each node
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwgamma`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwgamma1, n, a, b, tol)
# ------------------ #
# Internal Functions #
# ------------------ #
def _make_multidim_func(one_d_func, n, *args):
"""
A helper function to cut down on code repetition. Almost all of the
code in qnwcheb, qnwlege, qnwsimp, qnwtrap is just dealing
various forms of input arguments and then shelling out to the
corresponding 1d version of the function.
This routine does all the argument checking and passes things
through the appropriate 1d function before using a tensor product
to combine weights and nodes.
Parameters
----------
one_d_func : function
The 1d function to be called along each dimension
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
args :
These are the arguments to various qnw____ functions. For the
majority of the functions this is just a and b, but some differ.
Returns
-------
func : function
The multi-dimensional version of the parameter ``one_d_func``
"""
_args = list(args)
n = np.atleast_1d(n)
args = list(map(np.atleast_1d, _args))
if all([x.size == 1 for x in [n] + args]):
return one_d_func(n[0], *_args)
d = n.size
for i in range(len(args)):
if args[i].size == 1:
args[i] = np.repeat(args[i], d)
nodes = []
weights = []
for i in range(d):
ai = [x[i] for x in args]
_1d = one_d_func(n[i], *ai)
nodes.append(_1d[0])
weights.append(_1d[1])
weights = ckron(*weights[::-1]) # reverse ordered tensor product
nodes = gridmake(*nodes)
return nodes, weights
@jit(nopython=True)
def _qnwcheb1(n, a, b):
"""
Compute univariate Guass-Checbychev quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwcheb1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
nodes = (b+a)/2 - (b-a)/2 * np.cos(np.pi/n * np.linspace(0.5, n-0.5, n))
# Create temporary arrays to be used in computing weights
t1 = np.arange(1, n+1) - 0.5
t2 = np.arange(0.0, n, 2)
t3 = np.concatenate((np.array([1.0]),
-2.0/(np.arange(1.0, n-1, 2)*np.arange(3.0, n+1, 2))))
# compute weights and return
weights = ((b-a)/n)*np.cos(np.pi/n*np.outer(t1, t2)) @ t3
return nodes, weights
@jit(nopython=True)
def _qnwlege1(n, a, b):
"""
Compute univariate Guass-Legendre quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwlege1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
# import ipdb; ipdb.set_trace()
maxit = 100
m = int(fix((n + 1) / 2.0))
xm = 0.5 * (b + a)
xl = 0.5 * (b - a)
nodes = np.zeros(n)
weights = nodes.copy()
i = np.arange(m)
z = np.cos(np.pi * ((i + 1.0) - 0.25) / (n + 0.5))
for its in range(maxit):
p1 = np.ones_like(z)
p2 = np.zeros_like(z)
for j in range(1, n+1):
p3 = p2
p2 = p1
p1 = ((2 * j - 1) * z * p2 - (j - 1) * p3) / j
# https://github.com/QuantEcon/QuantEcon.py/issues/530
top = n * (z * p1 - p2)
bottom = z ** 2 - 1.0
pp = top / bottom
z1 = z.copy()
z = z1 - p1/pp
if np.all(np.abs(z - z1) < 1e-14):
break
if its == maxit - 1:
raise ValueError("Maximum iterations in _qnwlege1")
nodes[i] = xm - xl * z
nodes[- i - 1] = xm + xl * z
# https://github.com/QuantEcon/QuantEcon.py/issues/530
weights[i] = 2 * xl / ((1 - z ** 2) * pp * pp)
weights[- i - 1] = weights[i]
return nodes, weights
@jit(nopython=True)
def _qnwnorm1(n):
"""
Compute nodes and weights for quadrature of univariate standard
normal distribution
Parameters
----------
n : int
The number of nodes
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwnorm1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
maxit = 100
pim4 = 1 / np.pi**(0.25)
m = int(fix((n + 1) / 2))
nodes = np.zeros(n)
weights = np.zeros(n)
for i in range(m):
if i == 0:
z = np.sqrt(2*n+1) - 1.85575 * ((2 * n + 1)**(-1 / 6.1))
elif i == 1:
z = z - 1.14 * (n ** 0.426) / z
elif i == 2:
z = 1.86 * z + 0.86 * nodes[0]
elif i == 3:
z = 1.91 * z + 0.91 * nodes[1]
else:
z = 2 * z + nodes[i-2]
its = 0
while its < maxit:
its += 1
p1 = pim4
p2 = 0
for j in range(1, n+1):
p3 = p2
p2 = p1
p1 = z * math.sqrt(2.0/j) * p2 - math.sqrt((j - 1.0) / j) * p3
pp = math.sqrt(2 * n) * p2
z1 = z
z = z1 - p1/pp
if abs(z - z1) < 1e-14:
break
if its == maxit:
raise ValueError("Failed to converge in _qnwnorm1")
nodes[n - 1 - i] = z
nodes[i] = -z
weights[i] = 2 / (pp*pp)
weights[n - 1 - i] = weights[i]
weights /= math.sqrt(math.pi)
nodes = nodes * math.sqrt(2.0)
return nodes, weights
@jit(nopython=True)
def _qnwsimp1(n, a, b):
"""
Compute univariate Simpson quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwsimp1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
if n % 2 == 0:
print("WARNING qnwsimp: n must be an odd integer. Increasing by 1")
n += 1
nodes = np.linspace(a, b, n)
dx = nodes[1] - nodes[0]
weights = np.kron(np.ones((n+1) // 2), np.array([2.0, 4.0]))
weights = weights[:n]
weights[0] = weights[-1] = 1
weights = (dx / 3.0) * weights
return nodes, weights
@jit(nopython=True)
def _qnwtrap1(n, a, b):
"""
Compute univariate trapezoid rule quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwtrap1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
if n < 1:
raise ValueError("n must be at least one")
nodes = np.linspace(a, b, n)
dx = nodes[1] - nodes[0]
weights = dx * np.ones(n)
weights[0] *= 0.5
weights[-1] *= 0.5
return nodes, weights
@jit(nopython=True)
def _qnwbeta1(n, a=1.0, b=1.0):
"""
Computes nodes and weights for quadrature on the beta distribution.
Default is a=b=1 which is just a uniform distribution
NOTE: For now I am just following compecon; would be much better to
find a different way since I don't know what they are doing.
Parameters
----------
n : scalar : int
The number of quadrature points
a : scalar : float, optional(default=1)
First Beta distribution parameter
b : scalar : float, optional(default=1)
Second Beta distribution parameter
Returns
-------
nodes : np.ndarray(dtype=float, ndim=1)
The quadrature points
weights : np.ndarray(dtype=float, ndim=1)
The quadrature weights that correspond to nodes
Notes
-----
Based of original function ``_qnwbeta1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
# We subtract one and write a + 1 where we actually want a, and a
# where we want a - 1
a = a - 1
b = b - 1
maxiter = 25
# Allocate empty space
nodes = np.zeros(n)
weights = np.zeros(n)
# Find "reasonable" starting values. Why these numbers?
for i in range(n):
if i == 0:
an = a/n
bn = b/n
r1 = (1+a) * (2.78/(4+n*n) + .768*an/n)
r2 = 1 + 1.48*an + .96*bn + .452*an*an + .83*an*bn
z = 1 - r1/r2
elif i == 1:
r1 = (4.1+a) / ((1+a)*(1+0.156*a))
r2 = 1 + 0.06 * (n-8) * (1+0.12*a)/n
r3 = 1 + 0.012*b * (1+0.25*abs(a))/n
z = z - (1-z) * r1 * r2 * r3
elif i == 2:
r1 = (1.67+0.28*a)/(1+0.37*a)
r2 = 1+0.22*(n-8)/n
r3 = 1+8*b/((6.28+b)*n*n)
z = z-(nodes[0]-z)*r1*r2*r3
elif i == n - 2:
r1 = (1+0.235*b)/(0.766+0.119*b)
r2 = 1/(1+0.639*(n-4)/(1+0.71*(n-4)))
r3 = 1/(1+20*a/((7.5+a)*n*n))
z = z+(z-nodes[-4])*r1*r2*r3
elif i == n - 1:
r1 = (1+0.37*b) / (1.67+0.28*b)
r2 = 1 / (1+0.22*(n-8)/n)
r3 = 1 / (1+8*a/((6.28+a)*n*n))
z = z+(z-nodes[-3])*r1*r2*r3
else:
z = 3*nodes[i-1] - 3*nodes[i-2] + nodes[i-3]
ab = a+b
# Root finding
its = 0
z1 = -100
while abs(z - z1) > 1e-10 and its < maxiter:
temp = 2 + ab
p1 = (a-b + temp*z)/2
p2 = 1
for j in range(2, n+1):
p3 = p2
p2 = p1
temp = 2*j + ab
aa = 2*j * (j+ab)*(temp-2)
bb = (temp-1) * (a*a - b*b + temp*(temp-2) * z)
c = 2 * (j - 1 + a) * (j - 1 + b) * temp
p1 = (bb*p2 - c*p3)/aa
pp = (n*(a-b-temp*z) * p1 + 2*(n+a)*(n+b)*p2)/(temp*(1 - z*z))
z1 = z
z = z1 - p1/pp
if abs(z - z1) < 1e-12:
break
its += 1
if its == maxiter:
raise ValueError("Max Iteration reached. Failed to converge")
nodes[i] = z
weights[i] = temp/(pp*p2)
nodes = (1-nodes)/2
weights = weights * math.exp(gammaln(a+n) + gammaln(b+n) -
gammaln(n+1) - gammaln(n+ab+1))
weights = weights / (2*math.exp(gammaln(a+1) + gammaln(b+1) -
gammaln(ab+2)))
return nodes, weights
@jit(nopython=True)
def _qnwgamma1(n, a=1.0, b=1.0, tol=3e-14):
"""
1d quadrature weights and nodes for Gamma distributed random variable
Parameters
----------
n : scalar : int
The number of quadrature points
a : scalar : float, optional(default=1.0)
Shape parameter of the gamma distribution parameter. Must be positive
b : scalar : float, optional(default=1.0)
Scale parameter of the gamma distribution parameter. Must be positive
tol : scalar : float, optional(default=3e-14)
Tolerance parameter for newton iterations for each node
Returns
-------
nodes : np.ndarray(dtype=float, ndim=1)
The quadrature points
weights : np.ndarray(dtype=float, ndim=1)
The quadrature weights that correspond to nodes
Notes
-----
Based of original function ``qnwgamma1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
a -= 1
maxit = 25
factor = -math.exp(gammaln(a+n) - gammaln(n) - gammaln(a+1))
nodes = np.zeros(n)
weights = np.zeros(n)
# Create nodes
for i in range(n):
# Reasonable starting values
if i == 0:
z = (1+a) * (3+0.92*a) / (1 + 2.4*n + 1.8*a)
elif i == 1:
z = z + (15 + 6.25*a) / (1 + 0.9*a + 2.5*n)
else:
j = i-1
z = z + ((1 + 2.55*j) / (1.9*j) + 1.26*j*a / (1 + 3.5*j)) * \
(z - nodes[j-1]) / (1 + 0.3*a)
# root finding iterations
its = 0
z1 = -10000
while abs(z - z1) > tol and its < maxit:
p1 = 1.0
p2 = 0.0
for j in range(1, n+1):
# Recurrance relation for Laguerre polynomials
p3 = p2
p2 = p1
p1 = ((2*j - 1 + a - z)*p2 - (j - 1 + a)*p3) / j
pp = (n*p1 - (n+a)*p2) / z
z1 = z
z = z1 - p1/pp
its += 1
if its == maxit:
raise ValueError('Failure to converge')
nodes[i] = z
weights[i] = factor / (pp*n*p2)
return nodes*b, weights
| bsd-3-clause | -5,027,179,250,230,418,000 | 25.135792 | 78 | 0.56982 | false |
mosbys/Clone | Cloning_v1/drive.py | 1 | 3838 | import argparse
import base64
import json
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
from random import randint
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
import cv2
# Fix error with Keras and TensorFlow
import tensorflow as tf
import matplotlib.pyplot as plt
tf.python.control_flow_ops = tf
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
iDebug = 0
def preprocess(image, top_offset=.375, bottom_offset=.125):
"""
Applies preprocessing pipeline to an image: crops `top_offset` and `bottom_offset`
portions of image, resizes to 32x128 px and scales pixel values to [0, 1].
"""
top = int(top_offset * image.shape[0])
bottom = int(bottom_offset * image.shape[0])
image = image[top:-bottom, :]
newShape = image.shape
image= cv2.resize(image,(int(newShape[1]/2), int(newShape[0]/2)), interpolation = cv2.INTER_CUBIC)
return image
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
image_array=preprocess(image_array)
newShape = image_array.shape
#image_array=cv2.resize(image_array,(newShape[1], newShape[0]),interpolation=cv2.INTER_CUBIC)
transformed_image_array = image_array[None, :, :, :]
if (iDebug==1):
plt.imshow(image_array)
plt.show()
#transformed_image_array2 = np.zeros([1,2*64,64,3])
#transformed_image_array2[0]=cv2.resize(transformed_image_array[0],(2*64, 64),interpolation=cv2.INTER_CUBIC)
# This model currently assumes that the features of the model are just the images. Feel free to change this.
steering_angle = float(model.predict(transformed_image_array, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
#steering_angle = randint(0,100)/100*randint(-1,1);
throttle = 0.2
print(steering_angle, throttle)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
# NOTE: if you saved the file by calling json.dump(model.to_json(), ...)
# then you will have to call:
#
# model = model_from_json(json.loads(jfile.read()))\
#
# instead.
#model = model_from_json(jfile.read())
model = model_from_json(json.loads(jfile.read()))
model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app) | gpl-2.0 | 8,273,103,825,005,780,000 | 32.285714 | 112 | 0.663627 | false |
EarthLifeConsortium/elc_api | swagger_server/elc/geog.py | 1 | 2706 | """Functions related to geographic coordinates and paleo conversions."""
def get_geog(coords, age, options):
"""Parse paleo geography parameters."""
from ..elc import ages
modern = [x.strip() for x in coords.split(',')]
if '' in modern or len(modern) != 2:
msg = 'Second parameter not found in pair: coords'
raise ValueError(400, msg)
for value in modern:
try:
float(value)
except ValueError as err:
msg = 'Non-numeric in parameter pair: coords'
raise ValueError(400, msg)
if any(x in age for x in [',', '.']):
msg = 'Single integer or geologic name required: age'
raise ValueError(400, msg)
# Sub-service requires ageunits as 'ma'
factor = ages.set_age_scaler(options, 'pbdb')
if age[0].isalpha():
try:
ea1, la1 = ages.resolve_age(age)
age = round((ea1 + la1) / 2)
except ValueError as err:
raise ValueError(err.args[0], err.args[1])
else:
age = round(int(age) * factor)
paleo, geog_ref = resolve_geog(lat=float(modern[0]),
lon=float(modern[1]),
mean_age=age)
paleo = [round(x, 4) for x in paleo]
modern = [round(float(x), 4) for x in modern]
return paleo, modern, geog_ref
def resolve_geog(lat, lon, mean_age):
"""Query GPlates model (hosted by MacroStrat) for paleocoordinates."""
import requests
from ..elc import config
url = 'https://macrostrat.org/gplates/reconstruct'
payload = {'lat': lat, 'lng': lon, 'age': mean_age}
try:
r = requests.get(url=url,
params=payload,
timeout=config.get('default', 'timeout'))
r.raise_for_status()
except requests.exceptions.HTTPError as e:
msg = '{0:s}'.format(r.json().get('error'))
raise ValueError(r.status_code, msg)
if r.json().get('features')[0]['geometry']:
coords = r.json().get('features')[0]['geometry']['coordinates']
geog_ref = r.json().get('properties')['model']['citation']
return coords, geog_ref
else:
msg = 'Unavailable point or inalid WGS84 coords'
raise ValueError(400, msg)
def set_location(wkt, db):
"""Return location constraint payload parameter."""
if 'POLYGON((' not in wkt:
msg = 'WKT bounding box must be in POLYGON((...)) format'
raise ValueError(400, msg)
if db == 'neotoma':
return {'loc': wkt}
elif db == 'pbdb':
return {'loc': wkt}
# NEW RESOURCE: Add databse specific WKT bounding box vocabulary here
else:
return {}
| apache-2.0 | -4,612,773,691,188,010,000 | 29.404494 | 74 | 0.570214 | false |
bisguzar/lolasistan | src/lang.py | 1 | 1635 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import config
connectFailed = {
'en': 'connect failed',
'tr': 'bağlantı başarısız'
}
connected = {
'en': '[System] Connected',
'tr': '[Sistem] Bağlanıldı'
}
authFailed = {
'en': 'auth failed',
'tr': 'giriş başarısız'
}
authSucces = {
'en': '[System] auth succesfully',
'tr': '[Sistem] giriş başarılı'
}
master = {
'en': 'You are not my master!',
'tr': 'Siz yönetici değilsiniz!'
}
newCommand = {
'en': 'Please enter command and index. Etc: !hello Hi, how can help you?',
'tr': 'Lütfen komut ve içeriğini girin. Örn: !selam Merhaba, nasıl yardımcı olabilirim?'
}
alreadyAdded = {
'en': 'This command already added.',
'tr': 'Bu komut zaten mevcut.'
}
deleteCom = {
'en': "If you want delete a command just write '{0}{1} <commandName>' (without ')",
'tr': "Komut silmek için '{0}{1} <commandName>' yazın. (' olmadan)"
}
commandNotFound = {
'en': 'Command {} not found.',
'tr': '{} komutu bulunamadı.'
}
commandDeleted = {
'en': 'Command {0} has been deleted!',
'tr': '{} komutu başarıyla silindi!'
}
commandAdded = {
'en': "Command has ben added! You can test it, just write '{}' (without ')",
'tr': "Komut eklendi! '{}' yazarak test edebilirsiniz. (' olmadan)"
}
commandCreator = {
'en': "Command {} has ben added! Creator: ",
'tr': "{} komutu oluşturuldu! Oluşturan: "
}
helpCommand = {
'en': 'help',
'tr': 'yardım'
}
usableComms = {
'en': 'Usable commands: \n',
'tr': 'Kullanabileceğiniz komutlar: \n'
}
noIdea = {
'en': "Sorry but no I idea about '{}'.",
'tr': "Pardon, '{}' hakkında bir bilgim yok."
}
| gpl-3.0 | -653,396,016,554,987,500 | 19.253165 | 89 | 0.61375 | false |
aplicatii-romanesti/allinclusive-kodi-pi | .kodi/addons/plugin.video.salts/scrapers/watch8now_scraper.py | 1 | 3879 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import re
import urlparse
import urllib
import xbmcaddon
from salts_lib import dom_parser
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
BASE_URL = 'http://watch8now.so'
class Watch8Now_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'Watch8Now'
def resolve_link(self, link):
html = self._http_get(link, cache_limit=.5)
match = re.search('<iframe[^>]*src="([^"]+)', html, re.I)
if match:
return match.group(1)
else:
match = re.search('Nothing in HERE<br>([^<]+)', html, re.I)
if match:
return match.group(1).strip()
return link
def format_source_label(self, item):
label = '[%s] %s ' % (item['quality'], item['host'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
for table_cell in dom_parser.parse_dom(html, 'td', {'class': 'domain'}):
match = re.search('href="([^"]+)(?:[^>]+>){2}\s*([^<]+)', table_cell)
if match:
link, host = match.groups()
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': self._get_quality(video, host, QUALITIES.HIGH), 'views': None, 'rating': None, 'url': link, 'direct': False}
hosters.append(hoster)
return hosters
def get_url(self, video):
return super(Watch8Now_Scraper, self)._default_get_url(video)
def _get_episode_url(self, show_url, video):
episode_pattern = 'href="([^"]+[sS]%s[eE]%s\.html)"' % (video.season, video.episode)
title_pattern = 'href="([^"]+[sS]\d+[eE]\d+\.html")(?:[^>]+>){6}([^<]+)'
return super(Watch8Now_Scraper, self)._default_get_episode_url(show_url, video, episode_pattern, title_pattern)
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/search?q=')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, cache_limit=8)
results = []
for item in dom_parser.parse_dom(html, 'h4', {'class': 'media-heading'}):
match = re.search('href="([^"]+)">([^<]+)', item)
if match:
url, match_title = match.groups()
result = {'url': url.replace(self.base_url, ''), 'title': match_title, 'year': ''}
results.append(result)
return results
def _http_get(self, url, data=None, cache_limit=8):
return super(Watch8Now_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=cache_limit)
| apache-2.0 | -4,866,924,595,996,970,000 | 38.181818 | 199 | 0.601444 | false |
jstasiak/travis-solo | setup.py | 1 | 2016 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from setuptools import setup
from os.path import abspath, dirname, join
PROJECT_ROOT = abspath(dirname(__file__))
with open(join(PROJECT_ROOT, 'README.rst')) as f:
readme = f.read()
with open(join(PROJECT_ROOT, 'travis_solo.py')) as f:
version_line = [line for line in f.readlines() if line.startswith('__version__')][0]
version = version_line.split('=')[1].strip().strip("'")
install_requires = [
'PyYAML',
'termcolor',
]
try:
import argparse # noqa
except ImportError:
install_requires.append('argparse')
setup(
name='travis-solo',
version=version,
description='Local Travis build runner',
long_description=readme,
author='Jakub Stasiak',
url='https://github.com/jstasiak/travis-solo',
author_email='[email protected]',
py_modules=['travis_solo'],
platforms=['unix', 'linux', 'osx'],
license='MIT',
install_requires=install_requires,
entry_points=dict(
console_scripts=[
'travis-solo = travis_solo:main',
],
),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| mit | -6,734,856,298,556,025,000 | 31 | 88 | 0.613095 | false |
SymbiFlow/edalize | edalize/yosys.py | 1 | 4456 | # Copyright edalize contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
import logging
import os.path
from edalize.edatool import Edatool
logger = logging.getLogger(__name__)
class Yosys(Edatool):
argtypes = ['vlogdefine', 'vlogparam']
@classmethod
def get_doc(cls, api_ver):
if api_ver == 0:
return {'description' : "Open source synthesis tool targeting many different FPGAs",
'members' : [
{'name' : 'arch',
'type' : 'String',
'desc' : 'Target architecture. Legal values are *xilinx*, *ice40* and *ecp5*'},
{'name' : 'output_format',
'type' : 'String',
'desc' : 'Output file format. Legal values are *json*, *edif*, *blif*'},
{'name' : 'yosys_as_subtool',
'type' : 'bool',
'desc' : 'Determines if Yosys is run as a part of bigger toolchain, or as a standalone tool'},
{'name' : 'makefile_name',
'type' : 'String',
'desc' : 'Generated makefile name, defaults to $name.mk'},
{'name' : 'script_name',
'type' : 'String',
'desc' : 'Generated tcl script filename, defaults to $name.mk'},
],
'lists' : [
{'name' : 'yosys_synth_options',
'type' : 'String',
'desc' : 'Additional options for the synth command'},
]}
def configure_main(self):
# write Yosys tcl script file
(src_files, incdirs) = self._get_fileset_files()
part_of_toolchain = self.tool_options.get('yosys_as_subtool', False)
file_table = []
for f in src_files:
cmd = ""
if f.file_type.startswith('verilogSource'):
cmd = 'read_verilog'
elif f.file_type.startswith('systemVerilogSource'):
cmd = 'read_verilog -sv'
elif f.file_type == 'tclSource':
cmd = 'source'
else:
continue
file_table.append(cmd + ' {' + f.name + '}')
verilog_defines = []
for key, value in self.vlogdefine.items():
verilog_defines.append('{{{key} {value}}}'.format(key=key, value=value))
verilog_params = []
for key, value in self.vlogparam.items():
if type(value) is str:
value = "{\"" + value + "\"}"
_s = r"chparam -set {} {} {}"
verilog_params.append(_s.format(key,
self._param_value_str(value),
self.toplevel))
output_format = self.tool_options.get('output_format', 'blif')
arch = self.tool_options.get('arch', None)
if not arch:
logger.error("ERROR: arch is not defined.")
makefile_name = self.tool_options.get('makefile_name', self.name + '.mk')
script_name = self. tool_options.get('script_name', self.name + '.tcl')
template_vars = {
'verilog_defines' : "{" + " ".join(verilog_defines) + "}",
'verilog_params' : "\n".join(verilog_params),
'file_table' : "\n".join(file_table),
'incdirs' : ' '.join(['-I'+d for d in incdirs]),
'top' : self.toplevel,
'synth_command' : "synth_" + arch,
'synth_options' : " ".join(self.tool_options.get('yosys_synth_options', '')),
'write_command' : "write_" + output_format,
'default_target' : output_format,
'edif_opts' : '-pvector bra' if arch=='xilinx' else '',
'script_name' : script_name,
'name' : self.name
}
self.render_template('yosys-script-tcl.j2',
script_name,
template_vars)
makefile_name = self.name + '.mk' if part_of_toolchain else 'Makefile'
self.render_template('yosys-makefile.j2',
makefile_name,
template_vars)
| bsd-2-clause | -3,073,453,257,561,716,000 | 41.037736 | 119 | 0.467235 | false |
Hpower96/Power | backend/thread.py | 1 | 1338 | #!/usr/bin/env python
from multiprocessing import Process,Lock
import sys,os,time
script = sys.argv[0]
list = [1000,1200,1400,1600,1800,10000]
Proce_num_list = []
def worker(num):
try:
#print p.name, p.pid, os.getppid()
if int(num) == int(list[-1]):
print 'Check out automatically exit.'
os.system('kill -9 %s' % os.getppid())
#sys.exit()
elif num in list:
print '---------------------------------'
Proce_num = os.popen('ps -ef|grep -v grep |grep %s |wc -l' % script).read()
print 'The %s largest number of process: \033[;32m%s\033[0m' % (num ,Proce_num)
#Proce_num_list += int(Proce_num)
Proce_num_list.append(int(Proce_num))
#Proce_num_list[num] = int(Proce_num)
#print '---------------------------------'
#print Proce_num_list,'============='
#print type(Proce_num_list),'============='
time.sleep(10)
except (KeyboardInterrupt, OSError, AttributeError):
sys.exit()
if __name__ == "__main__":
num = 0
while True:
num = num + 1
Proce_num_list = []
try:
p = Process(target=worker ,args=(num,))
#print p.name, p.pid
p.start()
except:
p.shutdown()
| gpl-3.0 | 2,627,051,263,523,876,000 | 28.733333 | 91 | 0.480568 | false |
johanvdw/niche_vlaanderen | niche_vlaanderen/spatial_context.py | 1 | 7681 | from affine import Affine
from textwrap import dedent
import warnings
class SpatialContextError(Exception):
"""
"""
class SpatialContext(object):
"""Stores the spatial context of the grids in niche
This class is based on the rasterio model of a grid.
Attributes
----------
transform: Affine
Matrix that contains the transform transformation of the plane to
convert grid coordinates to real world coordinates.
https://github.com/sgillies/transform
width, height: int
Integer numbers containing the width and height of the raster
crs: rasterio.CRS
Container class for coordinate reference system info
"""
def __init__(self, dst):
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
if isinstance(dst.transform, Affine):
self.transform = dst.transform
else:
# for compatibility with rasterio 0.x
self.transform = dst.affine
self.width = int(dst.width)
self.height = int(dst.height)
# only occurs on Python 2
if dst.crs is None: # pragma: no cover
self.crs = ""
elif isinstance(dst.crs, basestring):
self.crs = dst.crs
else:
self.crs = dst.crs.to_string()
if self.transform[0] < 0:
raise SpatialContextError( # pragma: no cover
"Grid is indexed right to left. This is very uncommon."
"Try resampling your grid in GIS prior to using in Niche."
)
if self.transform[4] > 0:
raise SpatialContextError(
"Grid is indexed top to bottom. This is very uncommon."
"Try resampling your grid in GIS prior to using in Niche."
)
def __repr__(self):
s = """\
Extent: %s
%s
width: %d, height: %d
Projection: %s"""
s = dedent(s) % (self.extent, self.transform.__repr__(),
self.width, self.height, self.crs)
return s
def compare(self, other):
"""Compare two SpatialContexts
Equal to: Small differences (<1cm are allowed)
"""
if self.width != other.width:
return False
if self.height != other.height:
return False
if self.crs != other.crs:
if self.crs == '' or self.crs == '':
print("Ignoring missing CRS in comparison")
else:
print("Warning: CRS definitions are not equal!")
# TODO: we should probably look at the strict validation here.
# currently disabled until we have a better way to detect
# l72 variants
# return False
if self.transform.almost_equals(other.transform, precision=0.01):
return True
else:
return False
def __eq__(self, other):
"""Compare two SpatialContexts
Equal to: Small differences (<1cm are allowed)
"""
return self.compare(other)
def __ne__(self, other):
""" Compare two SpatialContexts
Not equal to: Small differences are allowed
"""
return not self.compare(other)
def check_overlap(self, new_sc):
"""Checks whether two SpatialContexts overlap
Overlapping spatial contexts are SpatialContexts with the same grid
dimensions (no resampling is needed to convert them).
Overlapping SpatialContexts can be used to intersect (set_overlap) or
can be used to define a read window.
A maximal offset of 1cm (0.01m) is allowed.
"""
if not ((abs(self.transform[0] - new_sc.transform[0]) < 0.01)
and (self.transform[1] == new_sc.transform[1])
and (self.transform[3] == new_sc.transform[3])
and (abs(self.transform[4] - new_sc.transform[4]) < 0.01)):
print("error: different grid size or orientation")
return False
# check cells overlap
dgx = (~self.transform)[2] - (~new_sc.transform)[2]
dgy = (~self.transform)[5] - (~new_sc.transform)[5]
# if these differences are not integer numbers, cells do not overlap
# we allow a difference of 0.01 m
if (abs(dgx - round(dgx)) > 0.01) or (abs(dgy - round(dgy)) > 0.01):
print("cells do not overlap")
print(dgx, dgy)
return False
else:
return True
@property
def extent(self):
extent_self = (self.transform) * (0, 0), \
(self.transform) * (self.width, self.height)
return extent_self
def set_overlap(self, new_sc):
""" Sets the spatial context to the overlap of both SpatialContexts
Parameters
==========
new_sc: SpatialContext
"""
# Check orientation and cell size are equal
if not self.check_overlap(new_sc):
raise SpatialContextError("no overlap in extent")
# determine the extent in the old and new system
extent_self = self.extent
extent_new = new_sc.extent
# The starting point of the combined raster is the left coordinate
# (if the 0th coefficient of transform is positive). and the bottom
# coordinate (if the 4th coefficient is negative)
# Note that usually the 0th coefficient is positive and the 4th
# negative.
extent_x = (max(extent_self[0][0], extent_new[0][0]),
min(extent_self[1][0], extent_new[1][0]))
extent_y = (min(extent_self[0][1], extent_new[0][1]),
max(extent_self[1][1], extent_new[1][1]))
self.width = round((extent_x[1] - extent_x[0]) / self.transform[0])
self.height = round((extent_y[1] - extent_y[0]) / self.transform[4])
self.transform = \
Affine(self.transform[0], self.transform[1], extent_x[0],
self.transform[3], self.transform[4], extent_y[0])
def get_read_window(self, new_sc):
"""Gets the read window that overlap with a different SpatialContext
Gets the window to be read from a new SpatialContext to
overlap with the current (equally large or larger) SpatialContext
Parameters
==========
new_sc: SpatialContext
Spatial context for which a read window is to be determined,
based on the extent of the overall (equally large or larger
base SpatialContext)
"""
if not self.check_overlap(new_sc):
raise SpatialContextError(
"Error: No overlap between both Spatial contexts."
)
# Get minimum and maximum position in the new grid system
gminxy = (~new_sc.transform) * ((0, 0) * self.transform)
gmaxxy = (~new_sc.transform) * (
(self.width, self.height) * self.transform)
# we can safely round here because we checked overlap before
# (differences are smaller than the tolerance
window = (round(gminxy[1], 2), round(gmaxxy[1], 2)),\
(round(gminxy[0], 2), round(gmaxxy[0], 2))
if window[0][0] < 0 or window[1][0] < 0 or window[1][1] > new_sc.width\
or window[1][0] > new_sc.height:
raise SpatialContextError(
"Error: new SpatialContexts is larger than current context.\n"
"Can not determine a read window")
return window
@property
def cell_area(self):
return abs(self.transform[0] * self.transform[4])
| mit | 4,143,655,543,510,224,000 | 32.395652 | 79 | 0.573102 | false |
pdf/beets | setup.py | 1 | 2918 | #!/usr/bin/env python
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import os
import sys
import subprocess
import shutil
from setuptools import setup
def _read(fn):
path = os.path.join(os.path.dirname(__file__), fn)
return open(path).read()
# Build manpages if we're making a source distribution tarball.
if 'sdist' in sys.argv:
# Go into the docs directory and build the manpage.
docdir = os.path.join(os.path.dirname(__file__), 'docs')
curdir = os.getcwd()
os.chdir(docdir)
try:
subprocess.check_call(['make', 'man'])
finally:
os.chdir(curdir)
# Copy resulting manpages.
mandir = os.path.join(os.path.dirname(__file__), 'man')
if os.path.exists(mandir):
shutil.rmtree(mandir)
shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir)
setup(name='beets',
version='1.1.0-beta.3',
description='music tagger and library organizer',
author='Adrian Sampson',
author_email='[email protected]',
url='http://beets.radbox.org/',
license='MIT',
platforms='ALL',
long_description=_read('README.rst'),
test_suite='test.testall.suite',
include_package_data=True, # Install plugin resources.
packages=[
'beets',
'beets.ui',
'beets.autotag',
'beets.util',
'beetsplug',
'beetsplug.bpd',
'beetsplug.web',
'beetsplug.lastgenre',
],
namespace_packages=['beetsplug'],
entry_points={
'console_scripts': [
'beet = beets.ui:main',
],
},
install_requires=[
'mutagen>=1.20',
'munkres',
'unidecode',
'musicbrainzngs>=0.2',
'pyyaml',
]
+ (['colorama'] if (sys.platform == 'win32') else [])
+ (['ordereddict'] if sys.version_info < (2, 7, 0) else []),
classifiers=[
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Environment :: Web Environment',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| mit | -138,347,469,473,975,420 | 30.376344 | 71 | 0.605552 | false |
dokterbob/django-shopkit | shopkit/core/utils/fields.py | 1 | 2814 | # Copyright (C) 2010-2011 Mathijs de Bruin <[email protected]>
#
# This file is part of django-shopkit.
#
# django-shopkit is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from decimal import Decimal
from django.db.models.fields import DecimalField
class MinMaxDecimalField(DecimalField):
"""
`DecimalField` subclass which allows specifying a minimum and maximum
value. Takes two extra optional parameters, to be specified as a Decimal
or string:
* `max_value`
* `min_value`
"""
description = 'DecimalField subclass which allows specifying a minimum \
and maximum value.'
def __init__(self, **kwargs):
self.max_value = kwargs.pop('max_value', None)
self.min_value = kwargs.pop('min_value', None)
super(MinMaxDecimalField, self).__init__(**kwargs)
def formfield(self, **kwargs):
if not self.max_value is None:
kwargs['max_value'] = Decimal(self.max_value)
if not self.min_value is None:
kwargs['min_value'] = Decimal(self.min_value)
return super(MinMaxDecimalField, self).formfield(**kwargs)
class PercentageField(MinMaxDecimalField):
"""
Subclass of `DecimalField` with sensible defaults for percentage
discounts:
* `max_value=100`
* `min_value=0`
* `decimal_places=0`
* `max_digits=3`
"""
description = 'Subclass of DecimalField with sensible defaults for \
percentage discounts.'
def __init__(self, **kwargs):
kwargs['max_value'] = kwargs.get('max_value', Decimal('100'))
kwargs['min_value'] = kwargs.get('min_value', Decimal('0'))
kwargs['decimal_places'] = kwargs.get('decimal_places', 0)
kwargs['max_digits'] = kwargs.get('max_digits', 3)
super(PercentageField, self).__init__(**kwargs)
# If South is installed, add introspection rules
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^shopkit\.core\.utils\.fields\.MinMaxDecimalField"])
add_introspection_rules([], ["^shopkit\.core\.utils\.fields\.PercentageField"])
except ImportError:
pass
| agpl-3.0 | 908,442,149,540,644,500 | 32.105882 | 86 | 0.680526 | false |
DataDog/integrations-core | nfsstat/datadog_checks/nfsstat/config_models/shared.py | 1 | 1331 | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from __future__ import annotations
from typing import Optional
from pydantic import BaseModel, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class SharedConfig(BaseModel):
class Config:
allow_mutation = False
autofs_enabled: Optional[bool]
nfsiostat_path: Optional[str]
service: Optional[str]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'shared_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'shared_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_shared', identity)(values))
| bsd-3-clause | 6,975,200,206,133,944,000 | 29.25 | 108 | 0.695718 | false |
clchiou/garage | py/nanomsg/tests/test_errors.py | 1 | 1124 | import unittest
from nanomsg import errors
from nanomsg.constants import Error
class ErrorsTest(unittest.TestCase):
def test_errors(self):
varz = vars(errors)
lookup_table = varz['_ERRORS']
for error in Error:
self.assertIn(error.name, errors.__all__)
self.assertIn(error.name, varz)
exc_class = varz[error.name]
self.assertTrue(issubclass(exc_class, errors.NanomsgError))
self.assertIs(exc_class, lookup_table[error])
# Look up by Error.
exc = errors.NanomsgError.make(error)
self.assertIsInstance(exc, exc_class)
self.assertEqual(error, exc.error)
# Look up by int errno value.
exc = errors.NanomsgError.make(error.value)
self.assertIsInstance(exc, exc_class)
self.assertEqual(error, exc.error)
# No such errno.
exc = errors.NanomsgError.make(999)
self.assertIsInstance(exc, errors.NanomsgError)
self.assertEqual(999, exc.error)
if __name__ == '__main__':
unittest.main()
| mit | 8,170,660,661,994,059,000 | 26.414634 | 71 | 0.598754 | false |
chroth/domainhog | main.py | 1 | 1860 | import sys
import logging
import data_handler
def update_data(context):
print 'Update data'
data_handler.update()
return 0, ""
def search(context):
if not data_handler.has_data():
context["logger"].debug("Data is missing")
update_data(context)
search_word = context['arguments'][1]
print 'Starting search for ' + search_word
all_tlds = data_handler.get_tlds()
hits = 0
for tld_item in all_tlds:
domain_suggestion = tld_item.get_suggestion(search_word)
if domain_suggestion:
print domain_suggestion
hits = hits + 1
if hits == 0:
print 'No hits'
return 0, ""
def show_help(context):
context["logger"].debug("Display API help")
msg = "Domainhog Commands:\n"
keys = sorted(context['api'].keys())
for k in keys:
msg += " {:17s} {:s}\n".format(k, context['api'][k][1])
return 0, msg.strip()
if __name__ == "__main__":
#default command
command = "help"
try:
command = sys.argv[1]
except IndexError as e:
pass
# setup logger
FORMAT = "%(asctime)s %(levelname)s %(funcName)s:%(lineno)s ~ %(message)s"
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
logger = logging.getLogger(__name__)
#available commands
api = {
'update': (update_data, "Updates tld information"),
'search': (search, "Searches for an available domain name"),
'help': (show_help, "Show available commands"),
}
#context for all commands
context = {
'logger': logger,
'command': command,
'arguments': sys.argv[1:],
'api': api
}
#excecute, returns code (!= 0 if failed) and a message
if not command in api:
command = 'help'
code, msg = api[command][0](context)
print msg
sys.exit(code)
| mit | -1,027,535,432,958,796,500 | 23.473684 | 78 | 0.585484 | false |
Wikidata/QueryAnalysis | tools/QueryAnalysis.py | 1 | 7379 | import argparse
import calendar
from datetime import datetime
import glob
import os
import shutil
import subprocess
import sys
import gzip
import unifyQueryTypes
from utility import utility
import config
os.nice(19)
months = {'january': [1, 31],
'february': [2, 28],
'march': [3, 31],
'april': [4, 30],
'may': [5, 31],
'june': [6, 30],
'july': [7, 31],
'august': [8, 31],
'september': [9, 30],
'october': [10, 31],
'november': [11, 30],
'december': [12, 31]}
parser = argparse.ArgumentParser("This script extracts the raw log data (if "
+ "it was not already done), processes them"
+ " using the java application and unifies "
+ "the query types.")
parser.add_argument("--ignoreLock", "-i", help="Ignore locked file and "
+ "execute anyways", action="store_true")
parser.add_argument("--threads", "-t", default=6, type=int, help="The number "
+ "of threads to run the java program with (default 7).")
parser.add_argument("--logging", "-l", help="Enables file logging.",
action="store_true")
parser.add_argument("--noBotMetrics", "-b", help="Disables metric calculation"
+ " for bot queries.", action="store_true")
parser.add_argument("--noDynamicQueryTypes", "-d", help="Disables dynamic "
+ "generation of query types.", action="store_true")
parser.add_argument("--noGzipOutput", "-g", help="Disables gzipping of the "
+ "output files.", action="store_true")
parser.add_argument("--noExampleQueriesOutput", "-e", help="Disables the "
+ "matching of example queries.", action="store_true")
parser.add_argument("--withUniqueQueryDetection", "-u", help="Enable unique query detection", action="store_true")
parser.add_argument("--dbLocation", "-p", type = str, default = config.dbLocation, help = "The path of the uniqueQueriesMapDb file.")
parser.add_argument("--queryTypeMapLocation", "-q", type = str, default = config.queryTypeMapDbLocation, help = "The path of the query type map db file. Default is in the working directory.")
parser.add_argument("--monthsFolder", "-m", default=config.monthsFolder,
type=str,
help="The folder in which the months directory are "
+ "residing.")
parser.add_argument("--year", "-y", default=datetime.now().year, type=int,
help="The year to be processed (default current year).")
parser.add_argument("months", type=str, help="The months to be processed")
# These are the field we extract from wmf.wdqs_extract that form the raw
# log data. They are not configurable via argument because the java program
# does not detect headers and thus depends on this specific order.
fields = ["uri_query", "uri_path", "user_agent", "ts", "agent_type",
"hour", "http_status"]
header = ""
for field in fields:
header += field + "\t"
header = header[:-1] + "\n"
if (len(sys.argv[1:]) == 0):
parser.print_help()
parser.exit()
args = parser.parse_args()
if calendar.isleap(args.year):
months['february'][1] = 29
for monthName in args.months.split(","):
if os.path.isfile(utility.addMissingSlash(args.monthsFolder)
+ utility.addMissingSlash(monthName) + "locked") \
and not args.ignoreLock:
print "ERROR: The month " + monthName + " is being edited at the " \
+ "moment. Use -i if you want to force the execution of this script."
sys.exit()
month = utility.addMissingSlash(os.path.abspath(utility.addMissingSlash(args.monthsFolder)
+ utility.addMissingSlash(monthName)))
processedLogDataDirectory = month + "processedLogData/"
rawLogDataDirectory = month + "rawLogData/"
tempDirectory = rawLogDataDirectory + "temp/"
# If the month directory does not exist it is being created along with
# the directories for raw and processed log data.
if not os.path.exists(month):
print("Starting data extraction from wmf.wdqs_extract for "
+ monthName + ".")
os.makedirs(month)
os.makedirs(processedLogDataDirectory)
os.makedirs(rawLogDataDirectory)
# For each day we send a command to hive that extracts all entries for
# this day (in the given month and year) and writes them to temporary
# files.
for day in xrange(1, months[monthName][1] + 1):
arguments = ['hive', '-e']
os.makedirs(tempDirectory)
hive_call = 'insert overwrite local directory \'' + tempDirectory \
+ '\' row format delimited fields terminated ' \
+ 'by \'\\t\' select '
# We add all the fields to the request
for field in fields:
hive_call += field + ", "
hive_call = hive_call[:-2] + " "
hive_call += ' from wmf.wdqs_extract where uri_query<>"" ' \
+ 'and year=\'' + str(args.year) + '\' and month=\'' \
+ str(months[monthName][0]) + '\' and day=\'' + str(day) + '\''
arguments.append(hive_call)
if subprocess.call(arguments) != 0:
print("ERROR: Raw data for month " + monthName + " does not "
+ "exist but could not be extracted using hive.")
sys.exit(1)
# The content of the temporary files is then copied to the actual
# raw log data file (with added headers)
with gzip.open(rawLogDataDirectory + "QueryCnt"
+ "%02d"%day + ".tsv.gz", "wb") as dayfile:
dayfile.write(header)
for filename in glob.glob(tempDirectory + '*'):
with open(filename) as temp:
for line in temp:
dayfile.write(line)
shutil.rmtree(tempDirectory)
# We build the call to execute the java application with the location of
# the files, the number of threads to use and any optional arguments needed
mavenCall = ['mvn', 'exec:java@QueryAnalysis']
mavenArguments = '-Dexec.args=-w ' + month + ' -t ' + str(args.threads) + ' -p ' + args.dbLocation + " -q " + args.queryTypeMapLocation
if args.logging:
mavenArguments += " -l"
if args.noBotMetrics:
mavenArguments += " -b"
if args.noDynamicQueryTypes:
mavenArguments += " -d"
if args.noGzipOutput:
mavenArguments += " -g"
if args.noExampleQueriesOutput:
mavenArguments += " -e"
if args.withUniqueQueryDetection:
mavenArguments += " -u"
mavenCall.append(mavenArguments)
owd = os.getcwd()
os.chdir("..")
print "Starting data processing using QueryAnalysis for " + monthName + "."
if subprocess.call(['mvn', 'clean', 'package']) != 0:
print "ERROR: Could not package the java application."
sys.exit(1)
if subprocess.call(mavenCall) != 0:
print("ERROR: Could not execute the java application. Check the logs "
+ "for details or rerun this script with -l to generate logs.")
sys.exit(1)
os.chdir(owd) | apache-2.0 | -7,126,092,825,844,727,000 | 40.460674 | 191 | 0.589511 | false |
beiko-lab/gengis | bin/Lib/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/large_scale.py | 1 | 1436 | from __future__ import division, print_function, absolute_import
from scipy import array, arange, ones, sort, cos, pi, rand, \
set_printoptions, r_
from scipy.sparse.linalg import lobpcg
from scipy import sparse
from pylab import loglog, show, xlabel, ylabel, title
set_printoptions(precision=8,linewidth=90)
import time
def sakurai(n):
""" Example taken from
T. Sakurai, H. Tadano, Y. Inadomi and U. Nagashima
A moment-based method for large-scale generalized eigenvalue problems
Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004) """
A = sparse.eye(n, n)
d0 = array(r_[5,6*ones(n-2),5])
d1 = -4*ones(n)
d2 = ones(n)
B = sparse.spdiags([d2,d1,d0,d1,d2],[-2,-1,0,1,2],n,n)
k = arange(1,n+1)
w_ex = sort(1./(16.*pow(cos(0.5*k*pi/(n+1)),4))) # exact eigenvalues
return A,B, w_ex
m = 3 # Blocksize
#
# Large scale
#
n = 2500
A,B, w_ex = sakurai(n) # Mikota pair
X = rand(n,m)
data = []
tt = time.clock()
eigs,vecs, resnh = lobpcg(X,A,B, residualTolerance=1e-6, maxIterations=500, retResidualNormsHistory=1)
data.append(time.clock()-tt)
print('Results by LOBPCG for n='+str(n))
print()
print(eigs)
print()
print('Exact eigenvalues')
print()
print(w_ex[:m])
print()
print('Elapsed time',data[0])
loglog(arange(1,n+1),w_ex,'b.')
xlabel(r'Number $i$')
ylabel(r'$\lambda_i$')
title('Eigenvalue distribution')
show()
| gpl-3.0 | 3,670,805,974,696,404,500 | 24.592593 | 102 | 0.627437 | false |
t-animal/helfertool | web/views/helfer.py | 1 | 11105 | # -*- coding: utf-8 -*-
from web.utils import render_template, expose, url_for, run_command_output, invalid_form_error
from web.session_utils import create_session, is_user_admin
from web.login_utils import require_login, require_admin, check_login_credentials, _find_userid_byname, _find_username_byid
from werkzeug import redirect
from werkzeug.wrappers import Request, Response
import web.utils
import db
import config
from Crypto.Random import random
from passlib.hash import sha256_crypt
import datetime
##fuer ical
from icalendar import Calendar, Event
import pytz
## neuen helfer eintragen
@expose('/helfer/new')
def neuerhelfer(request):
if not is_user_admin(request.session):
return redirect('/')
if request.method == 'GET':
## formular anzeigen, weil noch kein POST-foo
return render_template('helfer_anlegen.xml',
session=request.session)
## we got a request, handle it
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
mobile = request.form.get('mobile')
comment = request.form.get('comment')
tshirt = request.form.get('shirt_size')
pullover = request.form.get('pullover_size')
want_participant_shirt = request.form.get('want_participant_shirt')
## einzeln für spezifischere fehlermeldungen
if not db.sane_str(username, True):
return invalid_form_error(request.session,
msg=u"username leer oder enthält ungültige zeichen")
if not db.sane_str(password, True):
return invalid_form_error(request.session,
msg=u"passwort leer oder enthält ungültige zeichen")
if not db.sane_str(email):
return invalid_form_error(request.session,
msg=u"email enthält ungültige zeichen")
if not db.sane_str(mobile):
return invalid_form_error(request.session,
msg=u"handynummer enthält ungültige zeichen")
if not db.sane_str(comment):
return invalid_form_error(request.session,
msg=u"kommentartext enthält ungültige zeichen")
if not db.sane_str(tshirt):
return invalid_form_error(request.session,
msg=u"T-Shirt-Größe enthält ungültige Zeichen")
if not db.sane_str(pullover):
return invalid_form_error(request.session,
msg=u"Pullover-Größe enthält ungültige Zeichen")
db_uid, found = _find_userid_byname(username)
if found:
return invalid_form_error(request.session,
msg=u'ein benutzer mit diesem name existiert bereits. ich fürchte du musst dir einen anderen namen ausdenken')
crypted = sha256_crypt.encrypt(password)
userid = db.insert("INSERT INTO person (username, password, email, mobile, comment, is_admin, tshirt_size, pullover_size, want_participant_shirt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", (username, crypted, email, mobile, comment, 0, tshirt, pullover, want_participant_shirt))
## man ist auch gleich eingeloggt, wenn man sich registriert hat
create_session(request.session, userid, username, False)
return render_template('helfer_eingetragen_infos.xml',
username=username, email = email, session=request.session)
## der eine helfer -- was er macht, zur not delete-knopf
@require_login
@expose('/helfer/<int:helferid>')
def helferinfo(request, helferid):
admin_mode = is_user_admin(request.session)
if (request.session["userid"] != helferid) and not admin_mode:
return render_template('error.xml', error_short='unauthorized',
error_long="du kannst nur deine eigenen schichten anschauen. entweder du bist nicht eingeloggt, oder du versuchst schichten anderer leute anzuzeigen",
config=config, session=request.session)
if admin_mode:
helfer_name, found = _find_username_byid(helferid)
else:
helfer_name=request.session['username']
helfer = db.select('''SELECT
id,
username,
signed_fire,
signed_hygiene,
email,
mobile,
want_participant_shirt AS shirt
FROM
person
WHERE
id=?''', (helferid,))
if len(helfer) != 1:
return render_template('error.xml', error_short='invalid user',
error_long="irgendwas ist faul. du bist als benutzer #%d eingeloggt, aber nicht in der db." % (helferid,),
config=config, session=request.session)
rows = db.select('''SELECT
schicht.id AS id,
schicht.name AS name,
schicht.description AS description,
station.name AS station_name,
schicht.from_day AS from_day,
schicht.from_hour AS from_hour,
schicht.until_day AS until_day,
schicht.until_hour AS until_hour
FROM
person_schicht
LEFT JOIN
schicht on person_schicht.schicht_id = schicht.id
JOIN
station ON schicht.station_id = station.id
WHERE
person_schicht.pers_id=?
ORDER BY
schicht.from_day ASC, schicht.from_hour ASC''', (helferid,))
#fuer welche schichten die zeit zum austragen abgelaufen ist
daysSinceStart = (datetime.date.today()-config.start_date).days
curHours = datetime.datetime.time(datetime.datetime.now()).hour
showButton = [res["from_day"]*24 + res["from_hour"] > daysSinceStart*24 + curHours + config.user_shift_unclaim_timeout_hours + 1 for res in rows]
return render_template('helferinfo.xml',
schichten=rows,
helfer=helfer[0],
showButton=showButton,
session=request.session)
@require_login
@expose('/helfer/changepw')
def passwort_aendern(request):
if request.method != 'POST':
return redirect('/helfer')
old_pw = request.form.get('old_pw')
new_first = request.form.get('new_first')
new_second = request.form.get('new_second')
if not check_login_credentials(request.session['username'], old_pw):
error_long = u"Das alte Passwort, das du eingegeben hast, stimmt nicht. Du kannst dein Passwort auch bei einem Admin ändern lassen, frag am besten per Mail bei %s" % config.admin_email
return render_template('error.xml', error_short=u"altes passwort falsch",
error_long=error_long,
session=request.session)
if new_first != new_second:
error_long = u"Die beiden neuen Passwörter sind nicht gleich. Du hast dich sehr wahrscheinlich vertippt. Du kannst dein Passwort auch bei einem Admin ändern lassen, frag am besten per Mail bei %s" % config.admin_email
return render_template('error.xml',
error_short=u"Neue Passwörter sind unterschiedlich",
error_long=error_long,
session=request.session)
crypted = sha256_crypt.encrypt(new_first)
db.update('UPDATE person SET password=? WHERE id=?', (crypted,
request.session['userid']))
return redirect('/redirect/my_page')
@require_login
@expose('/helfer/change_data')
def dinge_aendern(request):
if request.method != 'POST':
return redirect('/helfer')
userid = request.session['userid']
new_email = request.form.get('email')
new_mobile = request.form.get('mobile')
want_shirt = request.form.get('want_participant_shirt') == "on"
old_want_shirt = db.select('SELECT want_participant_shirt FROM person WHERE id=?', (userid,))
if len(old_want_shirt) != 1:
## this should never happen, if the @require_login works as expected
## (i.e. if you ever trigger this assertion, go fix @require_login)
assert False
old_want_shirt= old_want_shirt[0]['want_participant_shirt']
## XXX: this feels redundant, but also sql-injection-exploitable if
## shortened too much..
if config.shirt_stuff_changeable:
db.update('''UPDATE
person
SET
email=?,mobile=?,want_participant_shirt=?
WHERE
id=?''', (new_email, new_mobile, want_shirt, userid))
else:
db.update('''UPDATE
person
SET
email=?,mobile=?
WHERE
id=?''', (new_email, new_mobile, userid))
return redirect('/helfer/%d' % (userid,))
@require_admin #we expose sensitive user information here!
@expose('/helfer.csv')
def alle_helfer_csv(request, helferid=None):
columns = "username, email, mobile, tshirt_size, pullover_size, want_participant_shirt, signed_hygiene, signed_fire, COUNT(person_schicht.pers_id) as shiftCount, min(schicht.from_day * 24 + schicht.from_hour) / 24 AS firstday, min(schicht.from_day * 24 + schicht.from_hour) % 24 AS firsthour "
persons = db.select("SELECT {} FROM person LEFT OUTER JOIN person_schicht ON person_schicht.pers_id = person.id LEFT OUTER JOIN schicht ON schicht.id = person_schicht.schicht_id GROUP BY person.id ORDER BY LOWER(username)".format(columns))
#No need for a template, as this is technical data
csv = ','.join(map(lambda x: x.split()[-1], columns.split(', ')))+" \r\n"
csv += u"\r\n".join(",".join('"'+unicode(column).replace('"', '""')+'"' for column in person) for person in persons)
response = Response(csv)
response.headers['content-type'] = 'text/csv; charset=utf-8'
return response
@expose('/helfer/<int:helferid>.ics')
def helfer_ical(request,helferid):
rows = db.select('''SELECT
schicht.id AS id,
schicht.name AS name,
schicht.description AS description,
station.name AS station_name,
schicht.from_day AS from_day,
schicht.from_hour AS from_hour,
schicht.until_day AS until_day,
schicht.until_hour AS until_hour,
GROUP_CONCAT(s2.username, ", ") as mithelfer
FROM
person_schicht
LEFT JOIN
schicht on person_schicht.schicht_id = schicht.id
JOIN
station ON schicht.station_id = station.id
JOIN
person_schicht as ps ON ps.schicht_id = person_schicht.schicht_id
LEFT JOIN
person as s2 ON ps.pers_id = s2.id AND s2.id != ?
WHERE
person_schicht.pers_id=?
GROUP BY
schicht.id
ORDER BY
schicht.from_day ASC, schicht.from_hour ASC''', (helferid,helferid))
cal = Calendar()
cal.add('prodid', '-//fsi//kiftool//DE')
cal.add('version', '2.0')
for termin in rows:
event = Event()
event.add('summary', termin['name'])
event.add('description', termin['description'] + (" " + termin['mithelfer'] if termin['mithelfer'] else ""))
until_hour = termin['until_hour']
until_min = 0
if until_hour == 24:
until_hour = 23
until_min = 59
if termin['from_day'] < 3:
event.add('dtstart', datetime.datetime(2013,10,29+termin['from_day'],termin['from_hour'],0,0,tzinfo=pytz.timezone('Europe/Berlin')))
else:
event.add('dtstart', datetime.datetime(2013,11,termin['from_day']-2,termin['from_hour'],0,0,tzinfo=pytz.timezone('Europe/Berlin')))
if termin['until_day'] < 3:
event.add('dtend', datetime.datetime(2013,10,29+termin['until_day'],until_hour,until_min,0,tzinfo=pytz.timezone('Europe/Berlin')))
else:
event.add('dtend', datetime.datetime(2013,11,termin['until_day']-2,until_hour,until_min,0,tzinfo=pytz.timezone('Europe/Berlin')))
event.add('dtstamp', datetime.datetime(2013,9,4,0,0,0,tzinfo=pytz.timezone('Europe/Berlin')))
event['uid'] = "2013uid"+str(termin['id'])+"@kif.fsi.informatik.uni-erlangen.de"
event.add('priority', 5)
cal.add_component(event)
response = Response(cal.to_ical())
response.headers['content-type'] = 'text/calendar; charset=utf-8'
return response
## übersicht ueber alle
@expose('/helfer')
def helfer_overview(request):
helfer = db.select('SELECT id, username FROM person')
schichten = {}
for h in helfer:
schichten[h[1]] = db.select('''SELECT schicht.name AS schichtname FROM
person_schicht
JOIN
schicht ON person_schicht.schicht_id = schicht.id
WHERE
person_schicht.pers_id=?''', (h[0],))
return render_template('helferuebersicht.xml', schichten=schichten,
session=request.session)
| gpl-2.0 | 5,990,155,164,621,906,000 | 34.174603 | 294 | 0.718231 | false |
MyRobotLab/pyrobotlab | home/moz4r/deprecated/Inmoov/InmoovScript_InmoovAI/INMOOV-AI_WeatherMap_Meteo.py | 1 | 2432 | global cur_temperature
global low_temperature
global high_temperature
global todayforecast
cur_temperature=0
low_temperature=0
high_temperature=0
todayforecast=0
def Meteo(Town_Parameter):
try:
if Town_Parameter=="0":
Town_Parameter=Town
print "http://api.openweathermap.org/data/2.5/weather?q=" + Town_Parameter + "&units=" + units + "&APPID=" + WeatherMapMeteoApi
response = urllib2.urlopen("http://api.openweathermap.org/data/2.5/weather?q=" + Town_Parameter + "&units=" + units + "&APPID=" + WeatherMapMeteoApi)
weather = response.read()
w = json.loads(weather)
#CURRENT TEMPERATURE
#print w['main']['temp'] #in kelvin
print weather
print w
cur_temperature = round(float(w['main']['temp']),0)
print ("Current Temp:")
print (round(cur_temperature, 0))
####################################################################
#FORECAST
response = urllib2.urlopen("http://api.openweathermap.org/data/2.5/forecast/daily?q="+Town_Parameter+"&units="+units+"&APPID="+WeatherMapMeteoApi)
weather = response.read()
w = json.loads(weather)
#TODAY'S LOW
low_temperature = round(float(w['list'][0]['temp']['min']),0)
print ("Daily Low: ")
print (round(low_temperature, 0))
#TODAY'S HIGH
high_temperature = round(float(w['list'][0]['temp']['max']),0)
print ("Daily High: ")
print (round(high_temperature, 0))
#rain or clear today?
todayforecast = w['list'][0]['weather'][0]['main']
print ("The weather is: ")
print (todayforecast)
if todayforecast == 'Clear':
todayforecast=2
if todayforecast == 'Rain':
todayforecast=3
if todayforecast == 'Clouds':
todayforecast=1
if todayforecast == 'Snow':
todayforecast=4
print "SYSTEM METEO curtemperature " + str(cur_temperature).replace(".0", "") + " lowtemperature " + str(low_temperature).replace(".0", "") + " hightemperature " + str(high_temperature).replace(".0", "") + " Town " + str(Town_Parameter) + " COMMENTAIRE " + str(todayforecast)
chatBot.getResponse("SYSTEM METEO curtemperature " + str(cur_temperature).replace(".0", "") + " lowtemperature " + str(low_temperature).replace(".0", "") + " hightemperature " + str(high_temperature).replace(".0", "") + " Town " + str(Town_Parameter) + " COMMENTAIRE " + str(todayforecast))
except:
chatBot.getResponse("SYSTEM METEO curtemperature 0 lowtemperature 0 hightemperature 0 Town 0 COMMENTAIRE 0")
print sys.exc_info()[0]
pass
| apache-2.0 | 2,056,180,113,262,557,000 | 35.848485 | 292 | 0.659128 | false |
kyprizel/certificate-transparency | python/ct/client/log_client.py | 1 | 39425 | """RFC 6962 client API."""
import base64
import json
import collections
from ct.client.db import database
from ct.crypto import verify
from ct.proto import client_pb2
import gflags
import httplib
import httplib2
import logging
import random
import urllib
import urlparse
from twisted.internet import defer
from twisted.internet import error
from twisted.internet import protocol
from twisted.internet import reactor as ireactor
from twisted.internet import task
from twisted.internet import threads
from twisted.python import failure
from twisted.web import client
from twisted.web import http
from twisted.web import iweb
from Queue import Queue
from zope.interface import implements
FLAGS = gflags.FLAGS
gflags.DEFINE_integer("entry_fetch_batch_size", 1000, "Maximum number of "
"entries to attempt to fetch in one request.")
gflags.DEFINE_integer("max_fetchers_in_parallel", 100, "Maximum number of "
"concurrent fetches.")
gflags.DEFINE_integer("get_entries_retry_delay", 1, "Number of seconds after "
"which get-entries will be retried if it encountered "
"an error.")
gflags.DEFINE_integer("get_entries_max_retries", 10, "Number of retries after "
"which get-entries simply fails.")
gflags.DEFINE_integer("entries_buffer", 100000, "Size of buffer which stores "
"fetched entries before async log client is able to "
"return them. 100000 entries shouldn't take more "
"than 600 Mb of memory.")
gflags.DEFINE_integer("response_buffer_size_bytes", 50 * 1000 * 1000, "Maximum "
"size of a single response buffer. Should be set such "
"that a get_entries response comfortably fits in the "
"the buffer. A typical log entry is expected to be < "
"10kB.")
gflags.DEFINE_bool("persist_entries", True, "Cache entries on disk.")
class Error(Exception):
pass
class ClientError(Error):
pass
class HTTPError(Error):
"""Connection failed, or returned an error."""
pass
class HTTPConnectionError(HTTPError):
"""Connection failed."""
pass
class HTTPResponseSizeExceededError(HTTPError):
"""HTTP response exceeded maximum permitted size."""
pass
class HTTPClientError(HTTPError):
"""HTTP 4xx."""
pass
class HTTPServerError(HTTPError):
"""HTTP 5xx."""
pass
class InvalidRequestError(Error):
"""Request does not comply with the CT protocol."""
pass
class InvalidResponseError(Error):
"""Response does not comply with the CT protocol."""
pass
###############################################################################
# Common utility methods and constants. #
###############################################################################
_GET_STH_PATH = "ct/v1/get-sth"
_GET_ENTRIES_PATH = "ct/v1/get-entries"
_GET_STH_CONSISTENCY_PATH = "ct/v1/get-sth-consistency"
_GET_PROOF_BY_HASH_PATH = "ct/v1/get-proof-by-hash"
_GET_ROOTS_PATH = "ct/v1/get-roots"
_GET_ENTRY_AND_PROOF_PATH = "ct/v1/get-entry-and-proof"
_ADD_CHAIN = "ct/v1/add-chain"
def _parse_sth(sth_body):
"""Parse a serialized STH JSON response."""
sth_response = client_pb2.SthResponse()
try:
sth = json.loads(sth_body)
sth_response.timestamp = sth["timestamp"]
sth_response.tree_size = sth["tree_size"]
sth_response.sha256_root_hash = base64.b64decode(sth[
"sha256_root_hash"])
sth_response.tree_head_signature = base64.b64decode(sth[
"tree_head_signature"])
# TypeError for base64 decoding, TypeError/ValueError for invalid
# JSON field types, KeyError for missing JSON fields.
except (TypeError, ValueError, KeyError) as e:
raise InvalidResponseError("Invalid STH %s\n%s" % (sth_body, e))
return sth_response
def _parse_entry(json_entry):
"""Convert a json array element to an EntryResponse."""
entry_response = client_pb2.EntryResponse()
try:
entry_response.leaf_input = base64.b64decode(
json_entry["leaf_input"])
entry_response.extra_data = base64.b64decode(
json_entry["extra_data"])
except (TypeError, ValueError, KeyError) as e:
raise InvalidResponseError("Invalid entry: %s\n%s" % (json_entry, e))
return entry_response
def _parse_entries(entries_body, expected_response_size):
"""Load serialized JSON response.
Args:
entries_body: received entries.
expected_response_size: number of entries requested. Used to validate
the response.
Returns:
a list of client_pb2.EntryResponse entries.
Raises:
InvalidResponseError: response not valid.
"""
try:
response = json.loads(entries_body)
except ValueError as e:
raise InvalidResponseError("Invalid response %s\n%s" %
(entries_body, e))
try:
entries = iter(response["entries"])
except (TypeError, KeyError) as e:
raise InvalidResponseError("Invalid response: expected "
"an array of entries, got %s\n%s)" %
(response, e))
# Logs MAY honor requests where 0 <= "start" < "tree_size" and
# "end" >= "tree_size" by returning a partial response covering only
# the valid entries in the specified range.
# Logs MAY restrict the number of entries that can be retrieved per
# "get-entries" request. If a client requests more than the
# permitted number of entries, the log SHALL return the maximum
# number of entries permissible. (RFC 6962)
#
# Therefore, we cannot assume we get exactly the expected number of
# entries. However if we get none, or get more than expected, then
# we discard the response and raise.
response_size = len(response["entries"])
if not response_size or response_size > expected_response_size:
raise InvalidResponseError("Invalid response: requested %d entries,"
"got %d entries" %
(expected_response_size, response_size))
return [_parse_entry(e) for e in entries]
def _parse_consistency_proof(response, servername):
try:
response = json.loads(response)
consistency = [base64.b64decode(u) for u in response["consistency"]]
except (TypeError, ValueError, KeyError) as e:
raise InvalidResponseError(
"%s returned invalid data: expected a base64-encoded "
"consistency proof, got %s"
"\n%s" % (servername, response, e))
return consistency
# A class that we can mock out to generate fake responses.
class RequestHandler(object):
"""HTTPS requests."""
def __init__(self, connection_timeout=60, ca_bundle=None,
num_retries=None):
self._http = httplib2.Http(
timeout=connection_timeout, ca_certs=ca_bundle)
# Explicitly check for None as num_retries being 0 is valid.
if num_retries is None:
num_retries = FLAGS.get_entries_max_retries
self._num_retries = num_retries
def __repr__(self):
return "%r()" % self.__class__.__name__
def __str__(self):
return "%r()" % self.__class__.__name__
def get_response(self, uri, params=None):
"""Get an HTTP response for a GET request."""
uri_with_params = self._uri_with_params(uri, params)
try:
num_get_attempts = self._num_retries + 1
while num_get_attempts > 0:
try:
return self._build_requests_style_response(
self._http.request(uri_with_params))
except httplib.IncompleteRead as e:
num_get_attempts = num_get_attempts - 1
logging.info("Retrying fetching %s, error %s" % (
uri_with_params, e))
raise HTTPError(
"Received incomplete reply to %s too many times" %
uri_with_params)
except httplib2.HttpLib2Error as e:
raise HTTPError("Connection to %s failed: %s" % (
uri_with_params, e))
def post_response(self, uri, post_data):
try:
return self._build_requests_style_response(
self._http.request(uri, "POST", json.dumps(post_data)))
except httplib2.HttpLib2Error as e:
raise HTTPError("POST to %s failed: %s" % (uri, e))
# Mimic the Response class from the requests API.
Response = collections.namedtuple('Response', ['status_code', 'reason', 'content', 'headers'])
@staticmethod
def check_response_status(code, reason, content='', headers=''):
if code == 200:
return
elif 400 <= code < 500:
raise HTTPClientError("%s (%s) %s" % (reason, content, headers))
elif 500 <= code < 600:
raise HTTPServerError("%s (%s) %s" % (reason, content, headers))
else:
raise HTTPError("%s (%s) %s" % (reason, content, headers))
@staticmethod
def _uri_with_params(uri, params=None):
if not params:
return uri
components = list(urlparse.urlparse(uri))
if params:
# Update the URI query, which is at index 4 of the tuple.
components[4] = urllib.urlencode(params)
return urlparse.urlunparse(components)
@staticmethod
def _build_requests_style_response((resp_hdr, resp_body)):
status_code = int(resp_hdr.pop("status")) if "status" in resp_hdr else 0
reason = resp_hdr["reason"] if "reason" in resp_hdr else ""
return RequestHandler.Response(status_code, reason, resp_body, resp_hdr)
def get_response_body(self, uri, params=None):
response = self.get_response(uri, params=params)
self.check_response_status(response.status_code, response.reason,
response.content, response.headers)
return response.content
def post_response_body(self, uri, post_data=None):
response = self.post_response(uri, post_data=post_data)
self.check_response_status(response.status_code, response.reason,
response.content, response.headers)
return response.content
###############################################################################
# The synchronous log client. #
###############################################################################
class LogClient(object):
"""HTTP client for talking to a CT log."""
"""Create a new log client.
Args:
uri: The CT Log URI to communicate with.
handler: A custom RequestHandler to use. If not specified, a new one
will be created.
connection_timeout: Timeout (in seconds) for all GET and POST requests.
ca_bundle: None or a file path containing a set of CA roots. If None,
httplib2 will attempt to locate a set of CA roots, falling back on its
own bundle if need be. See httplib2 documentation for more information.
"""
def __init__(self, uri, handler=None, connection_timeout=60,
ca_bundle=None):
self._uri = uri
if handler:
self._request_handler = handler
else:
self._request_handler = RequestHandler(connection_timeout, ca_bundle)
def __repr__(self):
return "%r(%r)" % (self.__class__.__name__, self._request_handler)
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self._request_handler.uri)
@property
def servername(self):
return self._uri
def _req_body(self, path, params=None):
return self._request_handler.get_response_body(self._uri + "/" + path,
params=params)
def _post_req_body(self, path, post_data=None):
return self._request_handler.post_response_body(
self._uri + "/" + path, post_data=post_data)
def _parse_sct(self, sct_response):
sct_data = json.loads(sct_response)
try:
sct = client_pb2.SignedCertificateTimestamp()
sct_version = sct_data["sct_version"]
if sct_version != 0:
raise InvalidResponseError(
"Unknown SCT version: %d" % sct_version)
sct.version = client_pb2.V1
sct.id.key_id = base64.b64decode(sct_data["id"])
sct.timestamp = sct_data["timestamp"]
hash_algorithm, sig_algorithm, sig_data = verify.decode_signature(
base64.b64decode(sct_data["signature"]))
sct.signature.hash_algorithm = hash_algorithm
sct.signature.sig_algorithm = sig_algorithm
sct.signature.signature = sig_data
return sct
except KeyError as e:
raise InvalidResponseError("SCT Missing field: %s" % e)
def get_sth(self):
"""Get the current Signed Tree Head.
Returns:
a ct.proto.client_pb2.SthResponse proto.
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed.
For logs that honour HTTP status codes, HTTPClientError (a 4xx)
should never happen.
InvalidResponseError: server response is invalid for the given
request.
"""
sth = self._req_body(_GET_STH_PATH)
return _parse_sth(sth)
def get_entries(self, start, end, batch_size=0):
"""Retrieve log entries.
Args:
start : index of first entry to retrieve.
end : index of last entry to retrieve.
batch_size: max number of entries to fetch in one go.
Yields:
ct.proto.client_pb2.EntryResponse protos.
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
or returned an error. HTTPClientError can happen when
[start, end] is not a valid range for this log.
InvalidRequestError: invalid request range (irrespective of log).
InvalidResponseError: server response is invalid for the given
request
Caller is responsible for ensuring that (start, end) is a valid range
(by retrieving an STH first), otherwise a HTTPClientError may occur.
"""
# Catch obvious mistakes here.
if start < 0 or end < 0 or start > end:
raise InvalidRequestError("Invalid range [%d, %d]" % (start, end))
batch_size = batch_size or FLAGS.entry_fetch_batch_size
while start <= end:
# Note that an HTTPError may occur here if the log does not have the
# requested range of entries available. RFC 6962 says:
# "Any errors will be returned as HTTP 4xx or 5xx responses, with
# human-readable error messages."
# There is thus no easy way to distinguish this case from other
# errors.
first = start
last = min(start + batch_size - 1, end)
response = self._req_body(_GET_ENTRIES_PATH,
params={"start": first, "end": last})
entries = _parse_entries(response, last - first + 1)
for entry in entries:
yield entry
# If we got less entries than requested, then we don't know whether
# the log imposed a batch limit or ran out of entries, so we keep
# trying until we get all entries, or an error response.
start += len(entries)
def get_sth_consistency(self, old_size, new_size):
"""Retrieve a consistency proof.
Args:
old_size : size of older tree.
new_size : size of newer tree.
Returns:
list of raw hashes (bytes) forming the consistency proof
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
or returned an error. HTTPClientError can happen when
(old_size, new_size) are not valid for this log (e.g. greater
than the size of the log).
InvalidRequestError: invalid request size (irrespective of log).
InvalidResponseError: server response is invalid for the given
request
Caller is responsible for ensuring that (old_size, new_size) are valid
(by retrieving an STH first), otherwise a HTTPClientError may occur.
"""
if old_size > new_size:
raise InvalidRequestError(
"old > new: %s >= %s" % (old_size, new_size))
if old_size < 0 or new_size < 0:
raise InvalidRequestError(
"both sizes must be >= 0: %s, %s" % (old_size, new_size))
# don't need to contact remote server for trivial proofs:
# - empty tree is consistent with everything
# - everything is consistent with itself
if old_size == 0 or old_size == new_size:
return []
response = self._req_body(_GET_STH_CONSISTENCY_PATH,
params={"first": old_size,
"second": new_size})
return _parse_consistency_proof(response, self.servername)
def get_proof_by_hash(self, leaf_hash, tree_size):
"""Retrieve an audit proof by leaf hash.
Args:
leaf_hash: hash of the leaf input (as raw binary string).
tree_size: size of the tree on which to base the proof.
Returns:
a client_pb2.ProofByHashResponse containing the leaf index
and the Merkle tree audit path nodes (as binary strings).
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
HTTPClientError can happen when leaf_hash is not present in the
log tree of the given size.
InvalidRequestError: invalid request (irrespective of log).
InvalidResponseError: server response is invalid for the given
request.
"""
if tree_size <= 0:
raise InvalidRequestError("Tree size must be positive (got %d)" %
tree_size)
leaf_hash = base64.b64encode(leaf_hash)
response = self._req_body(_GET_PROOF_BY_HASH_PATH,
params={"hash": leaf_hash,
"tree_size": tree_size})
response = json.loads(response)
proof_response = client_pb2.ProofByHashResponse()
try:
proof_response.leaf_index = response["leaf_index"]
proof_response.audit_path.extend(
[base64.b64decode(u) for u in response["audit_path"]])
except (TypeError, ValueError, KeyError) as e:
raise InvalidResponseError(
"%s returned invalid data: expected a base64-encoded "
"audit proof, got %s"
"\n%s" % (self.servername, response, e))
return proof_response
def get_entry_and_proof(self, leaf_index, tree_size):
"""Retrieve an entry and its audit proof by index.
Args:
leaf_index: index of the entry.
tree_size: size of the tree on which to base the proof.
Returns:
a client_pb2.EntryAndProofResponse containing the entry
and the Merkle tree audit path nodes (as binary strings).
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
HTTPClientError can happen when tree_size is not a valid size
for this log.
InvalidRequestError: invalid request (irrespective of log).
InvalidResponseError: server response is invalid for the given
request.
"""
if tree_size <= 0:
raise InvalidRequestError("Tree size must be positive (got %d)" %
tree_size)
if leaf_index < 0 or leaf_index >= tree_size:
raise InvalidRequestError("Leaf index must be smaller than tree "
"size (got index %d vs size %d" %
(leaf_index, tree_size))
response = self._req_body(_GET_ENTRY_AND_PROOF_PATH,
params={"leaf_index": leaf_index,
"tree_size": tree_size})
response = json.loads(response)
entry_response = client_pb2.EntryAndProofResponse()
try:
entry_response.entry.CopyFrom(_parse_entry(response))
entry_response.audit_path.extend(
[base64.b64decode(u) for u in response["audit_path"]])
except (TypeError, ValueError, KeyError) as e:
raise InvalidResponseError(
"%s returned invalid data: expected an entry and proof, got %s"
"\n%s" % (self.servername, response, e))
return entry_response
def get_roots(self):
"""Retrieve currently accepted root certificates.
Returns:
a list of certificates (as raw binary strings).
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
or returned an error. For logs that honour HTTP status codes,
HTTPClientError (a 4xx) should never happen.
InvalidResponseError: server response is invalid for the given
request.
"""
response = self._req_body(_GET_ROOTS_PATH)
response = json.loads(response)
try:
return [base64.b64decode(u)for u in response["certificates"]]
except (TypeError, ValueError, KeyError) as e:
raise InvalidResponseError(
"%s returned invalid data: expected a list od base64-encoded "
"certificates, got %s\n%s" % (self.servername, response, e))
def add_chain(self, certs_list):
"""Adds the given chain of certificates.
Args:
certs_list: A list of DER-encoded certificates to add.
Returns:
The SCT for the certificate.
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed.
For logs that honour HTTP status codes, HTTPClientError (a 4xx)
should never happen.
InvalidResponseError: server response is invalid for the given
request.
"""
sct_data = self._post_req_body(
_ADD_CHAIN,
{'chain': [base64.b64encode(certificate) for certificate in certs_list]})
return self._parse_sct(sct_data)
###############################################################################
# The asynchronous twisted log client. #
###############################################################################
class ResponseBodyHandler(protocol.Protocol):
"""Response handler for HTTP requests."""
def __init__(self, finished):
"""Initialize the one-off response handler.
Args:
finished: a deferred that will be fired with the body when the
complete response has been received; or with an error when the
connection is lost.
"""
self._finished = finished
def connectionMade(self):
self._buffer = []
self._len = 0
self._overflow = False
def dataReceived(self, data):
self._len += len(data)
if self._len > FLAGS.response_buffer_size_bytes:
# Note this flag has to be set *before* calling loseConnection()
# to ensure connectionLost gets called with the flag set.
self._overflow = True
self.transport.loseConnection()
else:
self._buffer.append(data)
def connectionLost(self, reason):
if self._overflow:
self._finished.errback(HTTPResponseSizeExceededError(
"Connection aborted: response size exceeded %d bytes" %
FLAGS.response_buffer_size_bytes))
elif not reason.check(*(error.ConnectionDone, client.ResponseDone,
http.PotentialDataLoss)):
self._finished.errback(HTTPConnectionError(
"Connection lost (received %d bytes)" % self._len))
else:
body = "".join(self._buffer)
self._finished.callback(body)
class AsyncRequestHandler(object):
"""A helper for asynchronous response body delivery."""
def __init__(self, agent):
self._agent = agent
@staticmethod
def _response_cb(response):
try:
RequestHandler.check_response_status(response.code, response.phrase,
list(response.headers.getAllRawHeaders()))
except HTTPError as e:
return failure.Failure(e)
finished = defer.Deferred()
response.deliverBody(ResponseBodyHandler(finished))
return finished
@staticmethod
def _make_request(path, params):
if not params:
return path
return path + "?" + "&".join(["%s=%s" % (key, value)
for key, value in params.iteritems()])
def get(self, path, params=None):
d = self._agent.request("GET", self._make_request(path, params))
d.addCallback(self._response_cb)
return d
class EntryProducer(object):
"""A push producer for log entries."""
implements(iweb.IBodyProducer)
def __init__(self, handler, reactor, uri, start, end,
batch_size, entries_db=None):
self._handler = handler
self._reactor = reactor
self._uri = uri
self._entries_db = entries_db
self._consumer = None
assert 0 <= start <= end
self._start = start
self._end = end
self._current = self._start
self._batch_size = batch_size
self._batches = Queue()
self._currently_fetching = 0
self._currently_stored = 0
self._last_fetching = self._current
self._max_currently_fetching = (FLAGS.max_fetchers_in_parallel *
self._batch_size)
# Required attribute of the interface.
self.length = iweb.UNKNOWN_LENGTH
self.min_delay = FLAGS.get_entries_retry_delay
@property
def finished(self):
return self._current > self._end
def __fail(self, failure):
if not self._stopped:
self.stopProducing()
self._done.errback(failure)
@staticmethod
def _calculate_retry_delay(retries):
"""Calculates delay based on number of retries which already happened.
Random is there, so we won't attack server lots of requests exactly
at the same time, and 1.3 is nice constant for exponential back-off."""
return ((0.4 + random.uniform(0.3, 0.6)) * FLAGS.get_entries_retry_delay
* 1.4**retries)
def _response_eb(self, failure, first, last, retries):
"""Error back for HTTP errors"""
if not self._paused:
# if it's not last retry and failure wasn't our fault we retry
if (retries < FLAGS.get_entries_max_retries and
not failure.check(HTTPClientError)):
logging.info("Retrying get-entries for range <%d, %d> retry: %d"
% (first, last, retries))
d = task.deferLater(self._reactor,
self._calculate_retry_delay(retries),
self._fetch_parsed_entries,
first, last)
d.addErrback(self._response_eb, first, last, retries + 1)
return d
else:
self.__fail(failure)
def _fetch_eb(self, failure):
"""Error back for errors after getting result of a request
(InvalidResponse)"""
self.__fail(failure)
def _write_pending(self):
d = defer.Deferred()
d.callback(None)
if self._pending:
self._current += len(self._pending)
self._currently_stored -= len(self._pending)
d = self._consumer.consume(self._pending)
self._pending = None
return d
def _batch_completed(self, result):
self._currently_fetching -= len(result)
self._currently_stored += len(result)
return result
def _store_batch(self, entry_batch, start_index):
assert self._entries_db
d = threads.deferToThread(self._entries_db.store_entries,
enumerate(entry_batch, start_index))
d.addCallback(lambda _: entry_batch)
return d
def _get_entries_from_db(self, first, last):
if FLAGS.persist_entries and self._entries_db:
d = threads.deferToThread(self._entries_db.scan_entries, first, last)
d.addCallbacks(lambda entries: list(entries))
d.addErrback(lambda fail: fail.trap(database.KeyError) and None)
return d
else:
d = defer.Deferred()
d.callback(None)
return d
def _fetch_parsed_entries(self, first, last):
# first check in database
d = self._get_entries_from_db(first, last)
d.addCallback(self._sub_fetch_parsed_entries, first, last)
return d
def _sub_fetch_parsed_entries(self, entries, first, last):
# it's not the best idea to attack server with many requests exactly at
# the same time, so requests are sent after slight delay.
if not entries:
request = task.deferLater(self._reactor,
self._calculate_retry_delay(0),
self._handler.get,
self._uri + "/" + _GET_ENTRIES_PATH,
params={"start": str(first),
"end": str(last)})
request.addCallback(_parse_entries, last - first + 1)
if self._entries_db and FLAGS.persist_entries:
request.addCallback(self._store_batch, first)
entries = request
else:
deferred_entries = defer.Deferred()
deferred_entries.callback(entries)
entries = deferred_entries
return entries
def _create_next_request(self, first, last, entries, retries):
d = self._fetch_parsed_entries(first, last)
d.addErrback(self._response_eb, first, last, retries)
d.addCallback(lambda result: (entries + result, len(result)))
d.addCallback(self._fetch, first, last, retries)
return d
def _fetch(self, result, first, last, retries):
entries, last_fetched_entries_count = result
next_range_start = first + last_fetched_entries_count
if next_range_start > last:
return entries
return self._create_next_request(next_range_start, last,
entries, retries)
def _create_fetch_deferred(self, first, last, retries=0):
d = defer.Deferred()
d.addCallback(self._fetch, first, last, retries)
d.addCallback(self._batch_completed)
d.addErrback(self._fetch_eb)
d.callback(([], 0))
return d
@defer.deferredGenerator
def produce(self):
"""Produce entries."""
while not self._paused:
wfd = defer.waitForDeferred(self._write_pending())
yield wfd
wfd.getResult()
if self.finished:
self.finishProducing()
return
first = self._last_fetching
while (self._currently_fetching <= self._max_currently_fetching and
self._last_fetching <= self._end and
self._currently_stored <= FLAGS.entries_buffer):
last = min(self._last_fetching + self._batch_size - 1, self._end,
self._last_fetching + self._max_currently_fetching
- self._currently_fetching + 1)
self._batches.put(self._create_fetch_deferred(first, last))
self._currently_fetching += last - first + 1
first = last + 1
self._last_fetching = first
wfd = defer.waitForDeferred(self._batches.get())
# Pause here until the body of the response is available.
yield wfd
# The producer may have been paused while waiting for the response,
# or errored out upon receiving it: do not write the entries out
# until after the next self._paused check.
self._pending = wfd.getResult()
def startProducing(self, consumer):
"""Start producing entries.
The producer writes EntryResponse protos to the consumer in batches,
until all entries have been received, or an error occurs.
Args:
consumer: the consumer to write to.
Returns:
a deferred that fires when no more entries will be written.
Upon success, this deferred fires number of produced entries or
None if production wasn't successful. Upon failure, this deferred
fires with the appropriate HTTPError.
Raises:
RuntimeError: consumer already registered.
"""
if self._consumer:
raise RuntimeError("Producer already has a consumer registered")
self._consumer = consumer
self._stopped = False
self._paused = True
self._pending = None
self._done = defer.Deferred()
# An IBodyProducer should start producing immediately, without waiting
# for an explicit resumeProducing() call.
task.deferLater(self._reactor, 0, self.resumeProducing)
return self._done
def pauseProducing(self):
self._paused = True
def resumeProducing(self):
if self._paused and not self._stopped:
self._paused = False
d = self.produce()
d.addErrback(self.finishProducing)
def stopProducing(self):
self._paused = True
self._stopped = True
def finishProducing(self, failure=None):
self.stopProducing()
if not failure:
self._done.callback(self._end - self._start + 1)
else:
self._done.errback(failure)
class AsyncLogClient(object):
"""A twisted log client."""
def __init__(self, agent, uri, entries_db=None, reactor=ireactor):
"""Initialize the client.
If entries_db is specified and flag persist_entries is true, get_entries
will return stored entries.
Args:
agent: the agent to use.
uri: the uri of the log.
entries_db: object that conforms TempDB API
reactor: the reactor to use. Default is twisted.internet.reactor.
"""
self._handler = AsyncRequestHandler(agent)
#twisted expects bytes, so if uri is unicode we have to change encoding
self._uri = uri.encode('ascii')
self._reactor = reactor
self._entries_db = entries_db
@property
def servername(self):
return self._uri
def get_sth(self):
"""Get the current Signed Tree Head.
Returns:
a Deferred that fires with a ct.proto.client_pb2.SthResponse proto.
Raises:
HTTPError, HTTPConnectionError, HTTPClientError,
HTTPResponseSizeExceededError, HTTPServerError: connection failed.
For logs that honour HTTP status codes, HTTPClientError (a 4xx)
should never happen.
InvalidResponseError: server response is invalid for the given
request.
"""
deferred_result = self._handler.get(self._uri + "/" + _GET_STH_PATH)
deferred_result.addCallback(_parse_sth)
return deferred_result
def get_entries(self, start, end, batch_size=0):
"""Retrieve log entries.
Args:
start: index of first entry to retrieve.
end: index of last entry to retrieve.
batch_size: max number of entries to fetch in one go.
Returns:
an EntryProducer for the given range.
Raises:
InvalidRequestError: invalid request range (irrespective of log).
Caller is responsible for ensuring that (start, end) is a valid range
(by retrieving an STH first), otherwise a HTTPClientError may occur
during production.
"""
# Catch obvious mistakes here.
if start < 0 or end < 0 or start > end:
raise InvalidRequestError("Invalid range [%d, %d]" % (start, end))
batch_size = batch_size or FLAGS.entry_fetch_batch_size
return EntryProducer(self._handler, self._reactor, self._uri,
start, end, batch_size, self._entries_db)
def get_sth_consistency(self, old_size, new_size):
"""Retrieve a consistency proof.
Args:
old_size : size of older tree.
new_size : size of newer tree.
Returns:
a Deferred that fires with list of raw hashes (bytes) forming the
consistency proof
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
or returned an error. HTTPClientError can happen when
(old_size, new_size) are not valid for this log (e.g. greater
than the size of the log).
InvalidRequestError: invalid request size (irrespective of log).
InvalidResponseError: server response is invalid for the given
request
Caller is responsible for ensuring that (old_size, new_size) are valid
(by retrieving an STH first), otherwise a HTTPClientError may occur.
"""
if old_size > new_size:
raise InvalidRequestError(
"old > new: %s >= %s" % (old_size, new_size))
if old_size < 0 or new_size < 0:
raise InvalidRequestError(
"both sizes must be >= 0: %s, %s" % (old_size, new_size))
# don't need to contact remote server for trivial proofs:
# - empty tree is consistent with everything
# - everything is consistent with itself
if old_size == 0 or old_size == new_size:
d = defer.Deferred()
d.callback([])
return d
deferred_response = self._handler.get(self._uri + "/" +
_GET_STH_CONSISTENCY_PATH,
params={"first": old_size,
"second": new_size})
deferred_response.addCallback(_parse_consistency_proof, self.servername)
return deferred_response
| apache-2.0 | -5,996,517,393,769,708,000 | 38.189861 | 98 | 0.576791 | false |
alexaltair/calico | calico/felix/fsocket.py | 1 | 8141 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Metaswitch Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
felix.fsocket
~~~~~~~~~~~~
Function for managing ZeroMQ sockets.
"""
import json
import logging
import time
import zmq
from calico.felix import futils
log = logging.getLogger(__name__)
class Socket(object):
"""
Socket is an encapsulation of a 0MQ socket wrapping the messaging logic.
It handles connecting and signalling errors, and maintains state about the
message flows.
"""
# Socket types
TYPE_EP_REQ = "EP REQ"
TYPE_EP_REP = "EP REP"
TYPE_ACL_REQ = "ACL REQ"
TYPE_ACL_SUB = "ACL SUB"
ALL_TYPES = set((TYPE_EP_REQ, TYPE_EP_REP, TYPE_ACL_REQ, TYPE_ACL_SUB))
REQUEST_TYPES = set((TYPE_EP_REQ, TYPE_ACL_REQ))
ACL_TYPES = set((TYPE_ACL_REQ, TYPE_ACL_SUB))
EP_TYPES = set((TYPE_EP_REQ, TYPE_EP_REP))
PORT = {TYPE_EP_REQ: 9901,
TYPE_EP_REP: 9902,
TYPE_ACL_REQ: 9905,
TYPE_ACL_SUB: 9906}
ZTYPE = {TYPE_EP_REQ: zmq.REQ,
TYPE_EP_REP: zmq.REP,
TYPE_ACL_REQ: zmq.REQ,
TYPE_ACL_SUB: zmq.SUB}
def __init__(self, type, config):
self.config = config
self.type = type
self.remote_addr = None
self.port = Socket.PORT[type]
self._zmq = None
self.last_activity = None
self.request_outstanding = False
if type in Socket.EP_TYPES:
self.remote_addr = self.config.PLUGIN_ADDR
else:
self.remote_addr = self.config.ACL_ADDR
def close(self):
"""
Close this connection cleanly.
"""
if self._zmq is not None:
self._zmq.close()
self._zmq = None
def communicate(self, hostname, context):
"""
Create and connect / bind a socket
"""
log.info(
"Creating socket to entity %s:%d", self.remote_addr, self.port
)
self._zmq = context.socket(Socket.ZTYPE[self.type])
if self.type == Socket.TYPE_EP_REP:
self._zmq.bind("tcp://%s:%s" % (self.config.LOCAL_ADDR, self.port))
else:
self._zmq.connect("tcp://%s:%s" % (self.remote_addr, self.port))
if self.type == Socket.TYPE_ACL_SUB:
self._zmq.setsockopt(zmq.IDENTITY, hostname)
self._zmq.setsockopt(zmq.SUBSCRIBE, 'aclheartbeat')
# The socket connection event is always the time of last activity.
self.last_activity = futils.time_ms()
# We do not have a request outstanding.
self.request_outstanding = False
def send(self, msg):
"""
Send a specified message on a socket.
"""
log.info("Sent %s on socket %s" % (msg.descr, self.type))
self.last_activity = futils.time_ms()
#*********************************************************************#
#* We never expect any type of socket that we use to block since we *#
#* use only REQ or REP sockets - so if we get blocking then we *#
#* consider that something is wrong, and let the exception take down *#
#* Felix. *#
#*********************************************************************#
try:
self._zmq.send(msg.zmq_msg, zmq.NOBLOCK)
if self.type in Socket.REQUEST_TYPES:
self.request_outstanding = True
except:
log.exception("Socket %s blocked on send", self.type)
raise
def receive(self):
"""
Receive a message on this socket. For subscriptions, this will return
a list of bytes.
"""
log.debug("Received something on %s", self.type)
#*********************************************************************#
#* We never expect any type of socket that we use to block since we *#
#* just polled to check - so if we get blocking then we consider *#
#* that something is wrong, and let the exception take down Felix. *#
#*********************************************************************#
try:
if self.type != Socket.TYPE_ACL_SUB:
data = self._zmq.recv(zmq.NOBLOCK)
uuid = None
else:
uuid, data = self._zmq.recv_multipart(zmq.NOBLOCK)
except:
log.exception("Socket %s blocked on receive", self.type)
raise
message = Message.parse_message(data, uuid)
# Log that we received the message.
log.info("Received %s on socket %s" % (message.descr, self.type))
# If this is a response, we're no longer waiting for one.
if self.type in Socket.REQUEST_TYPES:
self.request_outstanding = False
self.last_activity = futils.time_ms()
# A special case: heartbeat messages on the subscription interface are
# swallowed; the application code has no use for them.
if (self.type == Socket.TYPE_ACL_SUB and
message.type == Message.TYPE_HEARTBEAT):
return None
return message
def timed_out(self):
"""
Returns True if the socket has been inactive for at least the timeout;
all sockets must have heartbeats on them.
"""
return ((futils.time_ms() - self.last_activity) >
self.config.CONN_TIMEOUT_MS)
def keepalive_due(self):
"""
Returns True if we are due to send a keepalive on the socket.
The caller is responsible for deciding which sockets need keepalives.
"""
return ((futils.time_ms() - self.last_activity) >
self.config.CONN_KEEPALIVE_MS)
class Message(object):
"""This represents a message either sent or received by Felix."""
TYPE_RESYNC = "RESYNCSTATE"
TYPE_EP_CR = "ENDPOINTCREATED"
TYPE_EP_UP = "ENDPOINTUPDATED"
TYPE_EP_RM = "ENDPOINTDESTROYED"
TYPE_GET_ACL = "GETACLSTATE"
TYPE_ACL_UPD = "ACLUPDATE"
TYPE_HEARTBEAT = "HEARTBEAT"
def __init__(self, type, fields, endpoint_id=None):
#: The type of the message.
self.type = type
#: The description of the message, used for logging only.
if type == Message.TYPE_RESYNC and 'resync_id' in fields:
self.descr = "%s(%s)" % (type, fields['resync_id'])
elif endpoint_id is not None:
self.descr = "%s(%s)" % (type, endpoint_id)
elif 'endpoint_id' in fields:
self.descr = "%s(%s)" % (type, fields['endpoint_id'])
elif type in (Message.TYPE_EP_CR,
Message.TYPE_EP_UP,
Message.TYPE_EP_RM):
self.descr = "%s response" % (type)
else:
self.descr = type
#: A dictionary containing the other dynamic fields on the message.
self.fields = fields
# The endpoint ID for which this message is valid. Only used when
# type is TYPE_ACL_UPD.
self.endpoint_id = endpoint_id
@property
def zmq_msg(self):
"""
The serialized form of the message, suitable for sending on the wire.
"""
data = self.fields.copy()
data['type'] = self.type
return json.dumps(data)
@classmethod
def parse_message(cls, text, endpoint_id=None):
"""Parse a received message."""
data = json.loads(text)
type = data.pop('type')
msg = cls(type, data, endpoint_id)
return msg
| apache-2.0 | -2,236,228,492,208,511,500 | 33.205882 | 79 | 0.557548 | false |
sserrot/champion_relationships | venv/Lib/site-packages/IPython/extensions/sympyprinting.py | 1 | 1075 | """
**DEPRECATED**
A print function that pretty prints sympy Basic objects.
:moduleauthor: Brian Granger
Usage
=====
Once the extension is loaded, Sympy Basic objects are automatically
pretty-printed.
As of SymPy 0.7.2, maintenance of this extension has moved to SymPy under
sympy.interactive.ipythonprinting, any modifications to account for changes to
SymPy should be submitted to SymPy rather than changed here. This module is
maintained here for backwards compatibility with old SymPy versions.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import warnings
def load_ipython_extension(ip):
warnings.warn("The sympyprinting extension has moved to `sympy`, "
"use `from sympy import init_printing; init_printing()`")
| mit | -2,789,169,341,588,938,000 | 32.59375 | 78 | 0.541395 | false |
rhelmer/socorro-lib | socorro/external/postgresql/crontabber_state.py | 1 | 2109 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import logging
from socorro.external.postgresql.base import PostgreSQLBase
from socorro.lib import datetimeutil
logger = logging.getLogger("webapi")
class CrontabberState(PostgreSQLBase):
"""Implement the /crontabber_state service with PostgreSQL. """
def get(self, **kwargs):
"""Return the current state of all Crontabber jobs"""
sql = """
/* socorro.external.postgresql.crontabber_state.CrontabberState.get */
SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
error_count,
depends_on,
last_error,
ongoing
FROM crontabber
ORDER BY app_name
"""
error_message = (
"Failed to retrieve crontabber state data from PostgreSQL"
)
results = self.query(sql, error_message=error_message)
state = {}
for row in results:
app_name = row[0]
state[app_name] = dict(zip((
'next_run',
'first_run',
'last_run',
'last_success',
'error_count',
'depends_on',
'last_error',
'ongoing'
), row[1:]))
possible_datetimes = (
'next_run',
'first_run',
'last_run',
'last_success',
'ongoing'
)
for key in possible_datetimes:
value = state[app_name][key]
if value is None:
continue
state[app_name][key] = datetimeutil.date_to_string(value)
state[app_name]['last_error'] = json.loads(
state[app_name]['last_error']
)
return {"state": state}
| mpl-2.0 | 177,145,684,015,338,780 | 29.565217 | 78 | 0.497392 | false |
ralphhughes/TempLogger | DHT22.py | 1 | 7329 | #!/usr/bin/env python
# 2014-07-11 DHT22.py
import time
import atexit
import sys
import pigpio
class sensor:
"""
A class to read relative humidity and temperature from the
DHT22 sensor. The sensor is also known as the AM2302.
The sensor can be powered from the Pi 3V3 or the Pi 5V rail.
Powering from the 3V3 rail is simpler and safer. You may need
to power from 5V if the sensor is connected via a long cable.
For 3V3 operation connect pin 1 to 3V3 and pin 4 to ground.
Connect pin 2 to a gpio.
For 5V operation connect pin 1 to 5V and pin 4 to ground.
The following pin 2 connection works for me. Use at YOUR OWN RISK.
5V--5K_resistor--+--10K_resistor--Ground
|
DHT22 pin 2 -----+
|
gpio ------------+
"""
def __init__(self, pi, gpio, LED=None, power=None):
"""
Instantiate with the Pi and gpio to which the DHT22 output
pin is connected.
Optionally a LED may be specified. This will be blinked for
each successful reading.
Optionally a gpio used to power the sensor may be specified.
This gpio will be set high to power the sensor. If the sensor
locks it will be power cycled to restart the readings.
Taking readings more often than about once every two seconds will
eventually cause the DHT22 to hang. A 3 second interval seems OK.
"""
self.pi = pi
self.gpio = gpio
self.LED = LED
self.power = power
if power is not None:
pi.write(power, 1) # Switch sensor on.
time.sleep(2)
self.powered = True
self.cb = None
atexit.register(self.cancel)
self.bad_CS = 0 # Bad checksum count.
self.bad_SM = 0 # Short message count.
self.bad_MM = 0 # Missing message count.
self.bad_SR = 0 # Sensor reset count.
# Power cycle if timeout > MAX_TIMEOUTS.
self.no_response = 0
self.MAX_NO_RESPONSE = 2
self.rhum = -999
self.temp = -999
self.tov = None
self.high_tick = 0
self.bit = 40
pi.set_pull_up_down(gpio, pigpio.PUD_OFF)
pi.set_watchdog(gpio, 0) # Kill any watchdogs.
self.cb = pi.callback(gpio, pigpio.EITHER_EDGE, self._cb)
def _cb(self, gpio, level, tick):
"""
Accumulate the 40 data bits. Format into 5 bytes, humidity high,
humidity low, temperature high, temperature low, checksum.
"""
diff = pigpio.tickDiff(self.high_tick, tick)
if level == 0:
# Edge length determines if bit is 1 or 0.
if diff >= 50:
val = 1
if diff >= 200: # Bad bit?
self.CS = 256 # Force bad checksum.
else:
val = 0
if self.bit >= 40: # Message complete.
self.bit = 40
elif self.bit >= 32: # In checksum byte.
self.CS = (self.CS<<1) + val
if self.bit == 39:
# 40th bit received.
self.pi.set_watchdog(self.gpio, 0)
self.no_response = 0
total = self.hH + self.hL + self.tH + self.tL
if (total & 255) == self.CS: # Is checksum ok?
self.rhum = ((self.hH<<8) + self.hL) * 0.1
if self.tH & 128: # Negative temperature.
mult = -0.1
self.tH = self.tH & 127
else:
mult = 0.1
self.temp = ((self.tH<<8) + self.tL) * mult
self.tov = time.time()
if self.LED is not None:
self.pi.write(self.LED, 0)
else:
self.bad_CS += 1
elif self.bit >=24: # in temp low byte
self.tL = (self.tL<<1) + val
elif self.bit >=16: # in temp high byte
self.tH = (self.tH<<1) + val
elif self.bit >= 8: # in humidity low byte
self.hL = (self.hL<<1) + val
elif self.bit >= 0: # in humidity high byte
self.hH = (self.hH<<1) + val
else: # header bits
pass
self.bit += 1
elif level == 1:
self.high_tick = tick
if diff > 250000:
self.bit = -2
self.hH = 0
self.hL = 0
self.tH = 0
self.tL = 0
self.CS = 0
else: # level == pigpio.TIMEOUT:
self.pi.set_watchdog(self.gpio, 0)
if self.bit < 8: # Too few data bits received.
self.bad_MM += 1 # Bump missing message count.
self.no_response += 1
if self.no_response > self.MAX_NO_RESPONSE:
self.no_response = 0
self.bad_SR += 1 # Bump sensor reset count.
if self.power is not None:
self.powered = False
self.pi.write(self.power, 0)
time.sleep(2)
self.pi.write(self.power, 1)
time.sleep(2)
self.powered = True
elif self.bit < 39: # Short message receieved.
self.bad_SM += 1 # Bump short message count.
self.no_response = 0
else: # Full message received.
self.no_response = 0
def temperature(self):
"""Return current temperature."""
return self.temp
def humidity(self):
"""Return current relative humidity."""
return self.rhum
def staleness(self):
"""Return time since measurement made."""
if self.tov is not None:
return time.time() - self.tov
else:
return -999
def bad_checksum(self):
"""Return count of messages received with bad checksums."""
return self.bad_CS
def short_message(self):
"""Return count of short messages."""
return self.bad_SM
def missing_message(self):
"""Return count of missing messages."""
return self.bad_MM
def sensor_resets(self):
"""Return count of power cycles because of sensor hangs."""
return self.bad_SR
def trigger(self):
"""Trigger a new relative humidity and temperature reading."""
if self.powered:
if self.LED is not None:
self.pi.write(self.LED, 1)
self.pi.write(self.gpio, pigpio.LOW)
time.sleep(0.017) # 17 ms
self.pi.set_mode(self.gpio, pigpio.INPUT)
self.pi.set_watchdog(self.gpio, 200)
def cancel(self):
"""Cancel the DHT22 sensor."""
self.pi.set_watchdog(self.gpio, 0)
if self.cb != None:
self.cb.cancel()
self.cb = None
if __name__ == "__main__":
import time
import pigpio
import DHT22
# Intervals of about 2 seconds or less will eventually hang the DHT22.
INTERVAL=3
pi = pigpio.pi()
s = DHT22.sensor(pi, int(sys.argv[1]) ) # Pass the gpio pin from command line
next_reading = time.time()
s.trigger()
time.sleep(0.2)
print("Humidity={}% Temp={}* {:3.2f} {} {} {} {}".format(
s.humidity(), s.temperature(), s.staleness(),
s.bad_checksum(), s.short_message(), s.missing_message(),
s.sensor_resets()))
next_reading += INTERVAL
time.sleep(next_reading-time.time()) # Overall INTERVAL second polling.
s.cancel()
# pi.stop()
| mit | -9,112,710,096,243,817,000 | 25.363309 | 80 | 0.541957 | false |
rzarzynski/tempest | tempest/api/compute/keypairs/test_keypairs_negative.py | 1 | 4517 | # Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest import test
class KeyPairsNegativeTestJSON(base.BaseV2ComputeTest):
@classmethod
def setup_clients(cls):
super(KeyPairsNegativeTestJSON, cls).setup_clients()
cls.client = cls.keypairs_client
def _create_keypair(self, keypair_name, pub_key=None):
self.client.create_keypair(keypair_name, pub_key)
self.addCleanup(self.client.delete_keypair, keypair_name)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('29cca892-46ae-4d48-bc32-8fe7e731eb81')
def test_keypair_create_with_invalid_pub_key(self):
# Keypair should not be created with a non RSA public key
k_name = data_utils.rand_name('keypair-')
pub_key = "ssh-rsa JUNK nova@ubuntu"
self.assertRaises(lib_exc.BadRequest,
self._create_keypair, k_name, pub_key)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('7cc32e47-4c42-489d-9623-c5e2cb5a2fa5')
def test_keypair_delete_nonexistent_key(self):
# Non-existent key deletion should throw a proper error
k_name = data_utils.rand_name("keypair-non-existent-")
self.assertRaises(lib_exc.NotFound, self.client.delete_keypair,
k_name)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('dade320e-69ca-42a9-ba4a-345300f127e0')
def test_create_keypair_with_empty_public_key(self):
# Keypair should not be created with an empty public key
k_name = data_utils.rand_name("keypair-")
pub_key = ' '
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
k_name, pub_key)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('fc100c19-2926-4b9c-8fdc-d0589ee2f9ff')
def test_create_keypair_when_public_key_bits_exceeds_maximum(self):
# Keypair should not be created when public key bits are too long
k_name = data_utils.rand_name("keypair-")
pub_key = 'ssh-rsa ' + 'A' * 2048 + ' openstack@ubuntu'
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
k_name, pub_key)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('0359a7f1-f002-4682-8073-0c91e4011b7c')
def test_create_keypair_with_duplicate_name(self):
# Keypairs with duplicate names should not be created
k_name = data_utils.rand_name('keypair-')
self.client.create_keypair(k_name)
# Now try the same keyname to create another key
self.assertRaises(lib_exc.Conflict, self._create_keypair,
k_name)
self.client.delete_keypair(k_name)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('1398abe1-4a84-45fb-9294-89f514daff00')
def test_create_keypair_with_empty_name_string(self):
# Keypairs with name being an empty string should not be created
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
'')
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('3faa916f-779f-4103-aca7-dc3538eee1b7')
def test_create_keypair_with_long_keynames(self):
# Keypairs with name longer than 255 chars should not be created
k_name = 'keypair-'.ljust(260, '0')
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
k_name)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('45fbe5e0-acb5-49aa-837a-ff8d0719db91')
def test_create_keypair_invalid_name(self):
# Keypairs with name being an invalid name should not be created
k_name = 'key_/.\@:'
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
k_name)
| apache-2.0 | -3,770,412,985,485,290,500 | 43.284314 | 78 | 0.659951 | false |
ESSS/conda-devenv | tests/test_include.py | 1 | 2737 | import pytest
import yaml
from conda_devenv.devenv import handle_includes, render_jinja
def obtain_yaml_dicts(root_yaml_filename):
contents = open(root_yaml_filename, "r").read()
contents = render_jinja(contents, filename=root_yaml_filename, is_included=False)
root_yaml = yaml.safe_load(contents)
dicts = handle_includes(root_yaml_filename, root_yaml).values()
dicts = list(dicts)
# The list order does not matter, so we can"t use indices to fetch each item
number_of_parsed_yamls = len(dicts)
dicts = {d["name"]: d for d in dicts}
# Make sure we're not removing any parsed yamls
assert len(dicts) == number_of_parsed_yamls
return dicts
def test_include(datadir):
dicts = obtain_yaml_dicts(str(datadir / "c.yml"))
assert len(dicts) == 3
assert dicts["a"] == {
"name": "a",
"dependencies": ["a_dependency",],
"environment": {"PATH": ["a_path"]},
}
assert dicts["b"] == {
"name": "b",
"dependencies": ["b_dependency",],
"environment": {"PATH": ["b_path"]},
"channels": ["b_channel",],
}
assert dicts["c"] == {
"name": "c",
"channels": ["c_channel",],
}
dicts = obtain_yaml_dicts(str(datadir / "empty_includes.yml"))
assert dicts["empty_includes"] == {
"name": "empty_includes",
}
def test_include_non_dag(datadir):
dicts = obtain_yaml_dicts(str(datadir / "b_non_dag.yml"))
assert dicts["a"] == {
"name": "a",
"dependencies": ["a_dependency",],
}
assert dicts["b"] == {
"name": "b",
"dependencies": ["b_dependency",],
}
def test_include_non_existent_file(datadir):
with pytest.raises(ValueError) as e:
obtain_yaml_dicts(str(datadir / "includes_non_existent_file.yml"))
assert "includes_non_existent_file.yml" in str(e.value)
assert "some_non_existent_file.yml" in str(e.value)
def test_include_file_with_relative_includes(datadir):
dicts = obtain_yaml_dicts(str(datadir / "proj1/relative_include.yml"))
assert len(dicts) == 3
assert sorted(dicts.keys()) == ["proj1", "proj2", "set_variable"]
def test_include_relative_to_env_filename(datadir, monkeypatch):
monkeypatch.chdir(datadir / "proj1")
dicts = obtain_yaml_dicts(str(datadir / "relative_includes.yml"))
assert len(dicts) == 4
assert sorted(dicts.keys()) == [
"non_root_relative",
"proj1",
"proj2",
"set_variable",
]
def test_include_empty_file(datadir):
with pytest.raises(ValueError):
obtain_yaml_dicts(str(datadir / "includes_empty_file.yml"))
with pytest.raises(ValueError):
obtain_yaml_dicts(str(datadir / "empty_file.yml"))
| mit | -339,116,720,365,899,650 | 27.510417 | 85 | 0.61308 | false |
joxer/Baka-No-Voltron | tmp/android.dist/private/renpy/display/gesture.py | 1 | 3800 | # Copyright 2004-2015 Tom Rothamel <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import pygame
import math
import renpy.display
DIRECTIONS = [ "n", "ne", "e", "se", "s", "sw", "w", "nw" ]
def dispatch_gesture(gesture):
"""
This is called with a gesture to dispatch it as an event.
"""
event = renpy.config.gestures.get(gesture, None)
if event is not None:
renpy.exports.queue_event(event)
raise renpy.display.core.IgnoreEvent()
class GestureRecognizer(object):
def __init__(self):
super(GestureRecognizer, self).__init__()
self.x = None
self.y = None
def start(self, x, y):
# The last coordinates we saw motion at.
self.x = x
self.y = y
# Minimum sizes for gestures.
self.min_component = renpy.config.screen_width * renpy.config.gesture_component_size
self.min_stroke = renpy.config.screen_width * renpy.config.gesture_stroke_size
# The direction of the current strokes.
self.current_stroke = None
# The length of the current stroke.
self.stroke_length = 0
# A list of strokes we've recognized.
self.strokes = [ ]
def take_point(self, x, y):
if self.x is None:
return
dx = x - self.x
dy = y - self.y
length = math.hypot(dx, dy)
if length < self.min_component:
return
self.x = x
self.y = y
angle = math.atan2(dx, -dy) * 180 / math.pi + 22.5
if angle < 0:
angle += 360
stroke = DIRECTIONS[int(angle / 45)]
if stroke == self.current_stroke:
self.stroke_length += length
else:
self.current_stroke = stroke
self.stroke_length = length
if self.stroke_length > self.min_stroke:
if (not self.strokes) or (self.strokes[-1] != stroke):
self.strokes.append(stroke)
def finish(self):
rv = None
if self.x is None:
return
if self.strokes:
func = renpy.config.dispatch_gesture
if func is None:
func = dispatch_gesture
rv = func("_".join(self.strokes))
self.x = None
self.y = None
return rv
def cancel(self):
self.x = None
self.y = None
def event(self, ev, x, y):
if ev.type == pygame.MOUSEBUTTONDOWN:
self.start(x, y)
elif ev.type == pygame.MOUSEMOTION:
if ev.buttons[0]:
self.take_point(x, y)
elif ev.type == pygame.MOUSEBUTTONUP:
self.take_point(x, y)
if ev.button == 1:
return self.finish()
recognizer = GestureRecognizer()
| gpl-2.0 | 7,992,540,546,767,366,000 | 26.737226 | 92 | 0.61 | false |
CGATOxford/proj029 | documentation/source/conf.py | 1 | 10796 | # -*- coding: utf-8 -*-
#
# Combined microbiome and host profiling in a mouse model of colitis suggests host immune activity drives changes in the gut micro-environment that influence both community structure and gene expression documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 20 13:43:00 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Redefine supported_image_types for the HTML builder
from sphinx.builders.html import StandaloneHTMLBuilder
StandaloneHTMLBuilder.supported_image_types = ['image/png', 'image/pdf','image/svg+xml',
'image/gif', 'image/jpeg',
'application/pdf']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.programoutput',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Proj029 - Microbiome and host profiling in a mouse model of colitis'
#suggests host immune activity drives changes in the gut micro-environment that influence both community structure and gene expression'
copyright = u'2015, Nick Ilott'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {"sidebarbgcolor": "gainsboro",
# "sidebartextcolor": "midnightBlue",
# #"sidebarlinkcolor": "midnightBlue",
# #"textcolor": "black",
# }
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../logo/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Combinedmicrobiomeandhostprofilinginamousemodelofcolitis'
#suggestshostimmuneactivitydriveschangesinthegutmicro-environmentthatinfluencebothcommunitystructureandgeneexpressiondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Combinedmicrobiomeandhostprofilinginamousemodelofcolitis',
#suggestshostimmuneactivitydriveschangesinthegutmicro-environmentthatinfluencebothcommunitystructureandgeneexpression.tex', u'Combined microbiome and host profiling in a mouse model of colitis suggests host immune activity drives changes in the gut micro-environment that influence both community structure and gene expression Documentation',
u'Nick Ilott', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'combinedmicrobiomeandhostprofilinginamousemodelofcolitis',
#suggestshostimmuneactivitydriveschangesinthegutmicro-environmentthatinfluencebothcommunitystructureandgeneexpression', u'Combined microbiome and host profiling in a mouse model of colitis suggests host immune activity drives changes in the gut micro-environment that influence both community structure and gene expression Documentation',
[u'Nick Ilott'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Combinedmicrobiomeandhostprofilinginamousemodelofcolitis'
#suggestshostimmuneactivitydriveschangesinthegutmicro-environmentthatinfluencebothcommunitystructureandgeneexpression', u'Combined microbiome and host profiling in a mouse model of colitis suggests host immune activity drives changes in the gut micro-environment that influence both community structure and gene expression Documentation',
u'Nick Ilott', 'Combinedmicrobiomeandhostprofilinginamousemodelofcolitis'
#suggestshostimmuneactivitydriveschangesinthegutmicro-environmentthatinfluencebothcommunitystructureandgeneexpression', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| bsd-3-clause | -2,652,090,745,792,702,000 | 36.880702 | 342 | 0.729159 | false |
WanderingStar/rpi | shutdown_button.py | 1 | 2040 | #!/usr/bin/python
# This script is used with an LED and a momentary button, perhaps the same,
# like https://www.sparkfun.com/products/10440
# The LED should be wired to GPIO pin 23 and the button to pin 24.
# The idea is that it is run at startup (for example, from rc.local)
# It turns the LED on to indicate that it's working, and then waits
# for the user to hold down the button. When the script notices that
# the user is holding down the button (which may take up to 5 seconds),
# it starts flashing the LED to confirm. If the user continues to hold
# the button down, the LED goes off and the shutdown sequence is triggered.
# While the system is shutting down (which may take some time), the LED
# does a triple flash. When it's finished shutting down, the LED will
# turn off.
import os
import RPi.GPIO as GPIO
from time import sleep
LED = 23
BUTTON = 24
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(LED, GPIO.OUT, initial=1)
GPIO.setup(BUTTON, GPIO.IN)
def flashLED(secs):
GPIO.output(LED, 0)
sleep(secs)
GPIO.output(LED, 1)
shutdown = False
count = 0
while not shutdown:
# check to see if the button is pressed
if GPIO.input(BUTTON):
# keep track of how many cycles the button has been pressed
count += 1
if count < 5:
# if it hasn't been pressed long enough yet, flash the LED
flashLED(0.25)
else:
# if it has been pressed long enough, trigger shutdown
shutdown = True
# button is not pressed
else:
# reset the counter
count = 0
# check infrequently until we notice that the button is being pressed
if count > 0:
sleep(.25)
else:
sleep(5)
# let the user know that the button press has been noted by turning off the LED
GPIO.output(LED, 0)
os.system("shutdown -h now")
sleep(1)
# triple flash the LED until the program is killed by system shutdown
while True:
flashLED(.1)
sleep(.1)
flashLED(.1)
sleep(.1)
flashLED(.1)
sleep(.5)
| mit | -1,626,048,357,948,252,000 | 28.142857 | 79 | 0.676961 | false |
tanglu-org/merge-o-matic | deb/controlfile.py | 1 | 5047 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# deb/controlfile.py - parse debian control files
#
# Copyright © 2008 Canonical Ltd.
# Author: Scott James Remnant <[email protected]>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
class ControlFile(object):
"""Debian control file.
This can be used directly by calling the parse() function or
overridden to add functionality.
Class Properties:
FieldNames Alternate canonical capitalisation of field names
Properties:
paras List of paragraphs as dictionaries
para Final (or single) paragraph
signed True if the paragraph was PGP signed
"""
FieldNames = []
def __init__(self, filename=None, fileobj=None, *args, **kwds):
self.paras = []
self.para = None
self.signed = False
if fileobj is not None:
self.parse(fileobj, *args, **kwds)
elif filename is not None:
self.open(filename, *args, **kwds)
def capitaliseField(self, field):
"""Capitalise a field name correctly.
Fields are stored in the dictionary canonically capitalised,
words split by dashes and the first letter of each in upper
case.
This can be overriden by adding the canonical capitalisation
of a field name to the FieldNames list.
"""
for canon in self.FieldNames:
if canon.lower() == field.lower():
return canon
return "-".join([ w.title() for w in field.split("-") ])
def open(self, file, *args, **kwds):
"""Open and parse a control-file format file."""
with open(file) as f:
try:
self.parse(f, *args, **kwds)
except Exception, e:
e.path = file
raise e
def parse(self, file, multi_para=False, signed=False):
"""Parse a control-file format file.
File is any object that acts as an iterator and returns lines,
file-like objects being most common.
Some control files may contain multiple paragraphs separated
by blank lines, if this is the case set multi_para to True.
Some single-paragraph control files may be PGP signed, if this
is the case set signed to True. If the file was actually
signed, the signed member of the object will be set to True.
"""
self.para = {}
is_signed = False
last_field = None
para_border = True
for line in file:
line = line.rstrip()
if line.startswith("#"):
continue
# Multiple blank lines are permitted at paragraph borders
if not len(line) and para_border:
continue
para_border = False
if line[:1].isspace():
if last_field is None:
raise IOError
self.para[last_field] += "\n" + line.lstrip()
elif ":" in line:
(field, value) = line.split(":", 1)
if len(field.rstrip().split(None)) > 1:
raise IOError
last_field = self.capitaliseField(field)
self.para[last_field] = value.lstrip()
elif line.startswith("-----BEGIN PGP") and signed:
if is_signed:
raise IOError
for line in file:
if not len(line) or line.startswith("\n"): break
is_signed = True
elif not len(line):
para_border = True
if multi_para:
self.paras.append(self.para)
self.para = {}
last_field = None
elif is_signed:
try:
pgpsig = file.next()
if not len(pgpsig):
raise IOError
except StopIteration:
raise IOError
if not pgpsig.startswith("-----BEGIN PGP"):
raise IOError
self.signed = True
break
else:
raise IOError
else:
raise IOError
if is_signed and not self.signed:
raise IOError
if last_field:
self.paras.append(self.para)
elif len(self.paras):
self.para = self.paras[-1]
| gpl-3.0 | -9,124,939,611,046,515,000 | 31.346154 | 71 | 0.551724 | false |
phobson/conda-env | conda_env/exceptions.py | 1 | 2210 | class CondaEnvException(Exception):
pass
class CondaEnvRuntimeError(RuntimeError, CondaEnvException):
pass
class EnvironmentFileNotFound(CondaEnvException):
def __init__(self, filename, *args, **kwargs):
msg = '{} file not found'.format(filename)
self.filename = filename
super(EnvironmentFileNotFound, self).__init__(msg, *args, **kwargs)
class NoBinstar(CondaEnvRuntimeError):
def __init__(self):
msg = 'The anaconda-client cli must be installed to perform this action'
super(NoBinstar, self).__init__(msg)
class AlreadyExist(CondaEnvRuntimeError):
def __init__(self):
msg = 'The environment path already exists'
super(AlreadyExist, self).__init__(msg)
class EnvironmentAlreadyInNotebook(CondaEnvRuntimeError):
def __init__(self, notebook, *args, **kwargs):
msg = "The notebook {} already has an environment"
super(EnvironmentAlreadyInNotebook, self).__init__(msg, *args, **kwargs)
class EnvironmentFileDoesNotExist(CondaEnvRuntimeError):
def __init__(self, handle, *args, **kwargs):
self.handle = handle
msg = "{} does not have an environment definition".format(handle)
super(EnvironmentFileDoesNotExist, self).__init__(msg, *args, **kwargs)
class EnvironmentFileNotDownloaded(CondaEnvRuntimeError):
def __init__(self, username, packagename, *args, **kwargs):
msg = '{}/{} file not downloaded'.format(username, packagename)
self.username = username
self.packagename = packagename
super(EnvironmentFileNotDownloaded, self).__init__(msg, *args, **kwargs)
class SpecNotFound(CondaEnvRuntimeError):
def __init__(self, msg, *args, **kwargs):
super(SpecNotFound, self).__init__(msg, *args, **kwargs)
class InvalidLoader(Exception):
def __init__(self, name):
msg = 'Unable to load installer for {}'.format(name)
super(InvalidLoader, self).__init__(msg)
class IPythonNotInstalled(CondaEnvRuntimeError):
def __init__(self):
msg = """IPython notebook is not installed. Install it with:
conda install ipython-noteboook
"""
super(IPythonNotInstalled, self).__init__(msg)
| bsd-3-clause | -7,228,944,400,857,512,000 | 33 | 80 | 0.669683 | false |
cancerregulome/gidget | commands/feature_matrix_construction/main/parse_tcga_repeat_features.py | 1 | 20264 | '''
Created on Jun 18, 2012
this is a refactoring of sheila's new_Level3_matrix script into an OO
approach
this test version duplicates features to test tsvIO.duplicateFeatureLabels()
@author: m.miller
'''
import ConfigParser
from datetime import datetime
import os
import sys
import traceback
import miscIO
import path
from technology_type_factory import technology_type_factory
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def makeOutputFilename ( tumorList, platformID, outSuffix ):
outDir = config.get('main', 'out_directory')
if ( len(tumorList) == 1 ):
zCancer = tumorList[0]
elif len(tumorList) == len(config.get('main', 'cancerDirNames').split(',')):
zCancer = 'all'
else:
tumorList.sort()
zCancer = tumorList[0]
for aCancer in tumorList[1:]:
zCancer = zCancer + '_' + aCancer
print " --> combined multi-cancer name : <%s> " % zCancer
## start by pasting together the outDir, cancer sub-dir, then '/'
## and then the cancer name again, followed by a '.'
# outFilename = outDir + zCancer + "/" + zCancer + "."
# match change in current script to put directly in outDir --mm 2013-05-01
outFilename = outDir + "/" + zCancer + "."
# make sure the directory exists
if not os.path.exists(outDir):
os.makedirs(outDir)
## next we want to replace all '/' in the platform string with '__'
i1 = 0
while ( i1 >= 0 ):
i2 = platformID.find('/', i1)
if 0 > i2:
# --mm for testing on windows
i2 = platformID.find('\\', i1)
if ( i1>0 and i2>0 ):
outFilename += "__"
if ( i2 > 0 ):
outFilename += platformID[i1:i2]
i1 = i2 + 1
else:
i1 = i2
## and finally we add on the suffix (usually something like '25jun')
if ( not outSuffix.startswith(".") ):
outFilename += "."
outFilename += outSuffix
return ( outFilename )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def hackBarcode ( barcode ):
if (barcode.startswith("TCGA-") and barcode[19] == '-'):
barcode = barcode[:19] + 'A' + barcode[19:27]
return barcode
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def sanityCheckSDRF ( sdrfFilename ):
try:
fh = file ( sdrfFilename )
except:
print " ERROR in sanityCheckSDRF ??? failed to open file ??? "
print sdrfFilename
sys.exit(-1)
nl = miscIO.num_lines ( fh )
nr = min ( nl/2, 5 )
nt = [0] * nr
for index in range(nr):
aLine = fh.readline()
aLine = aLine.strip()
tokenList = aLine.split('\t')
nt[index] = len ( tokenList )
ntMin = min ( nt )
ntMax = max ( nt )
for index in range(nl-nr):
aLine = fh.readline()
aLine = aLine.strip()
tokenList = aLine.split('\t')
ntCur = len ( tokenList )
if ( ntCur == 0 ):
continue
if ( ntCur < ntMin ):
mess = "ERROR in sanityCheckSDRF ??? file appears to have been truncated ??? %i %i %i %s" % (ntCur, ntMin, ntMax, str(tokenList))
raise ValueError(mess)
fh.close()
return 1
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getSDRFinfo(sdrfFilename, techType):
print '\n', datetime.now(), 'in getSDRFinfo ... <%s> ' % sdrfFilename
## figure out which platform it is based on the SDRF file ...
print "\tPlatform : ", techType
sanityCheckSDRF(sdrfFilename)
techType.preprocessSDRFFile(sdrfFilename)
filename2sampleID = {}
reader = open(sdrfFilename)
hdrTokens = reader.readline().strip().split('\t')
# set the column indices
try:
techType.setConfiguration(hdrTokens)
except ValueError as ve:
print ve
return 0, {}, []
archives = set()
barcodes = set()
numTokens = len(hdrTokens)
lineNum = 0
while True:
tokens = reader.readline().strip().split('\t')
if ( len(tokens) < numTokens ):
break
lineNum += 1
(barcode, filename, archive, otherInfo, includeFlag, message) = techType.includeFile(tokens)
## also sanity check that we don't have duplicate barcodes ...
if (includeFlag and not barcode in barcodes):
archives.add(archive)
barcodes.add(barcode)
# TODO: check if barcode is a UUID and in hackBarcode look up barcode from UUID
fileBarcodes = filename2sampleID.get(filename, [])
## need to take this out (21feb13) because we need to keep
## the barcodes that the DCC reports in the UUID-to-barcode
## mapping metadata file ...
# fileBarcodes.append((hackBarcode(barcode), otherInfo, archive))
fileBarcodes.append((barcode, otherInfo, archive))
filename2sampleID[filename] = fileBarcodes
print '\tYES including this file ... ', techType.iFilename, tokens[techType.iFilename], techType.iBarcode, tokens[techType.iBarcode], techType.iYes, tokens[techType.iYes]
else:
if not message:
message = '\t(-) NOT including this file ... ', techType.iFilename, tokens[techType.iFilename], techType.iBarcode, tokens[techType.iBarcode], techType.iYes, tokens[techType.iYes]
print '\t', str(message)[1:-1].replace(',', ' '), 'line #: ', lineNum
if 0 == len(filename2sampleID):
raise ValueError('no files were found: tokens[barcode=%i]' % (techType.iBarcode))
keyList = filename2sampleID.keys()
keyList.sort()
print '\tfirst file in filename2sampleID dictionary: ', keyList[0], filename2sampleID[keyList[0]]
## now sort ...
archives = list(archives)
archives.sort()
print '\tfound %d archives and %d data files ' % ( len(archives), len(filename2sampleID) )
print datetime.now(), 'completed getSDRFinfo ... <%s> %d\n' % (sdrfFilename, len(filename2sampleID) )
return (len(barcodes), filename2sampleID, archives)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getArchiveRevision(dName, mtIndicator):
print '\tin getArchiveRevision ... %s' % dName
mageTabStart = dName.find(mtIndicator)
mageTabEnd = mageTabStart + 9
archDelim = dName.find('.', mageTabEnd)
revDelim = dName.find('.', archDelim+1)
iArch = int(dName[mageTabEnd:archDelim])
iRev = int(dName[archDelim+1:revDelim])
return ( iArch, iRev )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getSDRFilename(topDir):
print '\n', datetime.now(), "in getSDRFilename starting at %s" % topDir
## print topDir
if ( os.path.exists(topDir) ):
topPath = path.path ( topDir )
if ( not topPath.isdir() ):
print ' <%s> is not a directory, skipping ... ' % topPath
return ( 'NA' )
else:
print ' <%s> does not exist, skipping ... ' % topDir
return ( 'NA' )
## first we need to gather up all of the mage-tab directory names, with the proper
## archive and revision numbers ... and at the same time, find the highest
## archive #
mtIndicator = config.get('main', 'mage_tab_dir_indicator')
mageTabDict = {}
maxArch = -1
for dName in topPath.dirs():
if ( dName.find(mtIndicator) >= 0 ):
( iArch, iRev ) = getArchiveRevision(dName, mtIndicator)
mageTabDict[dName] = ( iArch, iRev )
if ( iArch > maxArch ):
maxArch = iArch
if ( maxArch == -1 ):
print " WARNING ... in getSDRFilename ... failed to find mage-tab directory in %s" % topDir
## now we need to get the highest revision number for this archive
maxRev = -1
for curKey in mageTabDict.keys():
if ( mageTabDict[curKey][0] == maxArch ):
if ( mageTabDict[curKey][1] > maxRev ):
maxRev = mageTabDict[curKey][1]
topKey = curKey
if ( maxRev < 0 ):
print "\n--> FAILED to find SDRF file !!! ", topDir
## print "\nFATAL ERROR in getSDRFilename ??? %s %s %s %s" % (mageTabDict.keys(), mageTabDict, maxArch, maxRev)
## return ( "NA" )
raise ValueError("\nFATAL ERROR in getSDRFilename ??? %s %s %s %s" % (mageTabDict.keys(), mageTabDict, maxArch, maxRev))
## and now we have the proper mage-tab directory
print '\thave topKey: ', topKey
mageTabDir = path.path( topKey)
for fName in mageTabDir.files():
print '\tlooking at fName <%s> ' % fName
if ( fName.endswith(config.get('main', 'sdrfExt')) ):
print datetime.now(), 'found', fName, 'in getSDRFilename'
return ( fName )
print "\t--> FAILED to find SDRF file !!! ", topDir
print "FATAL ERROR in getSDRFilename ??? %s %s %s %s" % (mageTabDict.keys(), mageTabDict, maxArch, maxRev)
return ( "NA" )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getFilesFromArchives(topDir, techType):
return techType.getFilesFromArchives(topDir)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def openDataFreezeLogFile(tumorTypes, outFilename, outSuffix, platformStrings):
print " in openDataFreezeLogFile ... "
if ( len(platformStrings) != 1 ):
print " ERROR in openDataFreezeLogFile ... there should only be one platform string "
print platformStrings
sys.exit(-1)
print "\t<%s> <%s> <%s> <%s>" % (outFilename, tumorTypes, outSuffix, platformStrings[0])
if len(tumorTypes) == len(config.get('main', 'cancerDirNames').split(',')):
tumorTypes = ['all']
tumors = ''
for tumorType in tumorTypes:
tumors += tumorType + '_'
tumors = tumors[:-1]
outFilename = outFilename[:outFilename.rindex('/') + 1]
dflFilename = outFilename + tumors + "." + outSuffix + "."
tokenList = platformStrings[0].split('/')
print len(tokenList), tokenList
for ii in range(len(tokenList)-1,0,-1):
if ( len(tokenList[ii]) > 0 ):
dflFilename += tokenList[ii]
dflFilename += "__"
dflFilename += tokenList[0]
dflFilename += ".data_freeze.log"
print " opening log file at: ", dflFilename
fh = file ( dflFilename, 'w' )
return ( fh )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def verifyPath(outFilename):
index = str(outFilename).rindex('/')
outPath = outFilename[0:index]
if (not os.path.exists(outPath)):
os.makedirs(outPath)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def writeLog(logFile, filename2sampleInfo, tumorType, platformID, outSuffix, outFilename):
logFile.write('%s Files for 2012_06_18_parse_tcga.py %s %s %s\n' % (datetime.now(), tumorType, outSuffix, platformID))
archive2files = {}
for filename, info in filename2sampleInfo.iteritems():
files = archive2files.get(info[0][2], [])
files += [(info[0][0], filename)]
archive2files[info[0][2]] = files
archives = archive2files.keys()
archives.sort()
for archive in archives:
logFile.write('\t%s\n' % archive)
fileinfo = archive2files[archive]
for barcode, filename in fileinfo:
logFile.write('\t\t%s %s\n' % (barcode, filename))
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def parseFileInfo(techType, tumorType):
try:
topDir = techType.getTopDirectory(tumorType);
except:
traceback.print_exc()
raise ValueError('problem reading top dir for %s:%s' % (techType, tumorType))
print "topDir: %s" % topDir
if techType.hasSDRFFile():
sdrfFilename = getSDRFilename(topDir)
if (sdrfFilename == "NA"):
print 'did not find any samples for %s' % tumorType
return 0, None, None, None
else:
numSamples, filename2sampleInfo, archiveList = getSDRFinfo(sdrfFilename, techType)
localTopDirs = [topDir] ## load up the information from the SDRF file ...
else:
numSamples, filename2sampleInfo, archiveList, localTopDirs = getFilesFromArchives(topDir, techType) ## load up the information from the directory structure ...
return numSamples, filename2sampleInfo, archiveList, localTopDirs
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def mergeFilename2SampleInfo(mergedFilename2sampleInfo, filename2sampleInfo):
# check for filename being the same
for key in filename2sampleInfo.iterkeys():
if key in mergedFilename2sampleInfo:
raise ValueError('(a) this should not happen, right ??? file %s already seen' % key)
# print "WARNING: (b) duplicate file should not happen, right ??? %s " % (key)
# continue
# check for barcodes being the same
newBarcodes = set([info[0][0] for info in filename2sampleInfo.itervalues()])
curBarcodes = set([info[0][0] for info in mergedFilename2sampleInfo.itervalues()])
if len(curBarcodes & newBarcodes):
for barcode in curBarcodes & newBarcodes:
if '-20A-' != barcode[12:17]:
raise ValueError('(b) this should not happen, right ??? barcode(s) %s already seen. %s %s' % ((curBarcodes & newBarcodes), barcode, barcode[12:17]))
mergedFilename2sampleInfo.update(filename2sampleInfo)
def mergeArchiveList(mergedArchiveList, archiveList):
# check for archives being the same
for archive in archiveList:
if (archive == "unknown"):
continue
if archive in mergedArchiveList:
raise ValueError('(c) this should not happen, right ??? archive %s already seen' % archive)
mergedArchiveList += archiveList
return mergedArchiveList
def parseCancers(platformID, tumorTypes, outSuffix):
if 1 == len(tumorTypes) and 'all' == tumorTypes[0].lower():
tumorTypes = config.get("main","cancerDirNames").split(',')
techType = technology_type_factory(config).getTechnologyType(config, platformID)
outFilename = makeOutputFilename(tumorTypes, platformID, outSuffix)
logFile = openDataFreezeLogFile(tumorTypes, outFilename, outSuffix, [platformID])
totalSamples = 0
mergedFilename2sampleInfo = {}
mergedArchiveList = []
topDirs = []
for tumorType in tumorTypes:
try:
numSamples, filename2sampleInfo, archiveList, localTopDirs = parseFileInfo(techType, tumorType)
if 0 == numSamples:
print 'did not find any samples for %s' % tumorType
continue
if techType.isBioAssayDataMatrix():
totalSamples = numSamples
mergedFilename2sampleInfo = filename2sampleInfo
mergedArchiveList = archiveList
else:
mergeFilename2SampleInfo(mergedFilename2sampleInfo, filename2sampleInfo)
mergedArchiveList = mergeArchiveList(mergedArchiveList, archiveList)
## write out what we are using to the log file ...
writeLog(logFile, filename2sampleInfo, tumorType, platformID, outSuffix, outFilename)
topDirs += localTopDirs
totalSamples += numSamples
except Exception as e:
print
traceback.print_exc(10)
# record the error but move on
# print 'ERROR: problem parsing tumor type %s for platform %s' % (tumorType, platformID), e
# raise the exception and stop processing
raise e
if 0 == totalSamples:
## print 'did not find any samples for tumor types %s for platform \'%s\'' % (tumorTypes, platformID)
## return
raise ValueError('ERROR ??? did not find any samples for tumor types %s for platform \'%s\'' % (tumorTypes, platformID))
logFile.flush()
logFile.close()
print "--> setting numSamples ... samples: %i files: %i mappings: %i" % (totalSamples, len(mergedFilename2sampleInfo), len(mergedFilename2sampleInfo))
try:
(dataMatrix, featureList, sampleList) = techType.processDirectories(topDirs, mergedArchiveList, mergedFilename2sampleInfo, totalSamples)
print '\n\tdata matrix(%i): %s\n\tfeature list(%i): %s\n' % \
(len(dataMatrix), dataMatrix[-20:], len(featureList), featureList[-20:])
repeatRows = dataMatrix[:15:3]
for row in repeatRows:
row[0] += 1
dataMatrix += repeatRows
repeatFeatures = featureList[:15:3]
featureList += repeatFeatures
print '\n\trepeat rows(%i): %s\n\tdata matrix(%i): %s\n\trepeat features(%i): %s\n\tfeature list(%i): %s\n' % \
(len(repeatRows), repeatRows, len(dataMatrix), dataMatrix[-20:], len(repeatFeatures), repeatFeatures, len(featureList), featureList[-20:])
matrixParams = techType.postprocess(dataMatrix, featureList, sampleList)
verifyPath(outFilename)
techType.writeMatrix(matrixParams, outFilename)
except Exception as e:
print
traceback.print_exc()
print
raise ValueError('ERROR ??? problem processing tumor types %s for platform \'%s\'' % (tumorTypes, platformID), e)
print
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def verifyArgs(platformID, tumorTypes):
if ( len(tumorTypes) == 0 ):
raise ValueError(" ERROR ??? have no tumor types in list ??? ", tumorTypes)
if 'all' != tumorTypes[0].lower():
cancerNames = config.get("main","cancerDirNames")
for tumorType in tumorTypes:
if (tumorType in cancerNames):
print "\tprocessing tumor type: ", tumorType
else:
raise ValueError("ERROR ??? tumorType <%s> not in list of known tumors: %s? " % (tumorType,cancerNames))
platformStrings = technology_type_factory(config).getTechnologyTypes()
if (platformID in platformStrings):
print "\tprocessing platform: ", platformID
else:
print "\tplatform <%s> is not supported " % platformID
print "\tcurrently supported platforms are:\n", platformStrings
raise ValueError("ERROR ??? platform <%s> is not supported " % platformID)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def setConfig(configFile):
# Read the configuration file
config = ConfigParser.ConfigParser()
config.read(configFile)
return config
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def checkUsage(argv):
if ( len(argv) < 2 or len(argv) > 6):
print " Usage: %s <config file>[ <platform>[ <tumor,[...]*>[ <tag>][ <snapshot>]*]*]*]*" % argv[0]
print " saw %s" % argv
print " ERROR -- bad command line arguments "
sys.exit(-1)
print 'checking %s for validity from %s' % (argv[1], path.path.getcwd())
if not path.path.isfile(path.path(argv[1])):
print '%s is not a file from %s' % (argv[1], path.path.getcwd())
sys.exit(-1)
def initialize(argv):
checkUsage(argv)
global config
config = setConfig(argv[1])
if 2 < len(argv):
platformID = argv[2]
else:
platformID = config.get('main', 'platformID')
if 3 < len(argv):
tumorTypes = argv[3].split(',')
else:
tumorTypes = config.get('main', 'tumorTypes').split(',')
if 4 < len(argv):
outSuffix = argv[4]
else:
outSuffix = config.get('main', 'outSuffix')
if 5 < len(argv):
config.set('technology_type', 'snapshot', argv[5])
verifyArgs(platformID, tumorTypes)
return platformID, tumorTypes, outSuffix
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if __name__ == '__main__':
print datetime.now(), "starting..."
platformID, tumorTypes, outSuffix = initialize(sys.argv)
parseCancers(platformID, tumorTypes, outSuffix)
print datetime.now(), "finished"
sys.exit(0)
| mit | 728,553,990,787,473,900 | 39.206349 | 194 | 0.577625 | false |
mganeva/mantid | Framework/PythonInterface/test/python/mantid/geometry/ComponentInfoTest.py | 1 | 20085 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
import argparse
import numpy as np
from testhelpers import WorkspaceCreationHelper
from mantid.kernel import V3D
from mantid.kernel import Quat
from mantid.geometry import CSGObject
from mantid.simpleapi import *
from itertools import islice
class ComponentInfoTest(unittest.TestCase):
_ws = None
def setUp(self):
if self.__class__._ws is None:
self.__class__._ws = WorkspaceCreationHelper.create2DWorkspaceWithFullInstrument(2, 1, False) # no monitors
self.__class__._ws.getSpectrum(0).clearDetectorIDs()
"""
----------------------------------------------------------------------------
Normal Tests
----------------------------------------------------------------------------
The following test cases test normal usage of the exposed methods.
"""
def test_len(self):
""" Check that there are only 6 components """
info = self._ws.componentInfo()
self.assertEquals(len(info), 6)
def test_size(self):
""" Check that there are only 6 components """
info = self._ws.componentInfo()
self.assertEquals(info.size(), 6)
def test_isDetector(self):
"""Check which components are detectors """
info = self._ws.componentInfo()
self.assertEquals(info.isDetector(0), True)
self.assertEquals(info.isDetector(1), True)
self.assertEquals(info.isDetector(2), False)
self.assertEquals(info.isDetector(3), False)
self.assertEquals(info.isDetector(4), False)
self.assertEquals(info.isDetector(5), False)
def test_detectorsInSubtree(self):
""" Test that a list of detectors is returned """
info = self._ws.componentInfo()
self.assertEquals(type(info.detectorsInSubtree(0)), np.ndarray)
def test_componentsInSubtree(self):
""" Test that a list of components is returned """
info = self._ws.componentInfo()
self.assertEquals(type(info.componentsInSubtree(0)), np.ndarray)
def test_position(self):
""" Test that the component's position is returned. """
info = self._ws.componentInfo()
self.assertEquals(type(info.position(0)), V3D)
def test_rotation(self):
""" Test that the component's rotation is returned. """
info = self._ws.componentInfo()
self.assertEquals(type(info.rotation(0)), Quat)
def test_relativePosition(self):
""" Test that the component's relative position is returned. """
info = self._ws.componentInfo()
self.assertEquals(type(info.relativePosition(0)), V3D)
def test_relativeRotation(self):
""" Test that the component's relative rotation is returned. """
info = self._ws.componentInfo()
self.assertEquals(type(info.relativeRotation(0)), Quat)
def test_setPosition(self):
""" Test that the component's position can be set correctly. """
info = self._ws.componentInfo()
pos = V3D(0,0,0)
info.setPosition(0, pos)
retPos = info.position(0)
self.assertEquals(pos, retPos)
def test_setRotation(self):
""" Test that the component's rotation can be set correctly. """
info = self._ws.componentInfo()
quat = Quat(0,0,0,0)
info.setRotation(0, quat)
retQuat = info.rotation(0)
self.assertEquals(quat, retQuat)
def test_hasSource(self):
""" Check if there is a source """
info = self._ws.componentInfo()
self.assertEquals(info.hasSource(), True)
def test_hasSample(self):
""" Check if there is a sample """
info = self._ws.componentInfo()
self.assertEquals(info.hasSample(), True)
def test_source(self):
""" Check if a source component is returned """
info = self._ws.componentInfo()
self.assertEquals(type(info.source()) , int)
def test_sample(self):
""" Check if a sample component is returned """
info = self._ws.componentInfo()
self.assertEquals(type(info.sample()) , int)
def test_sourcePosition(self):
""" Check that the source postition is a V3D object """
info = self._ws.componentInfo()
self.assertEquals(type(info.sourcePosition()), V3D)
def test_samplePosition(self):
""" Check that the sample postition is a V3D object """
info = self._ws.componentInfo()
self.assertEquals(type(info.samplePosition()), V3D)
def test_hasParent(self):
""" Check if a component has a parent component """
info = self._ws.componentInfo()
self.assertTrue(info.hasParent(0))
def test_parent(self):
""" Check that for a component that has a parent, the parent
component is retrieved. """
info = self._ws.componentInfo()
self.assertEquals(type(info.parent(0)), int)
def test_children(self):
""" Check that for a component that has children, the children
components can be retrieved. """
info = self._ws.componentInfo()
self.assertEquals(type(info.children(0)), np.ndarray)
def test_name(self):
""" Get the name of a component as a string """
info = self._ws.componentInfo()
self.assertEquals(type(info.name(0)), str)
def test_l1(self):
""" Get the l1 value """
info = self._ws.componentInfo()
self.assertEquals(type(info.l1()), float)
def test_scaleFactor(self):
""" Get the scale factor """
info = self._ws.componentInfo()
self.assertEquals(type(info.scaleFactor(0)), V3D)
def test_setScaleFactor(self):
""" Set the scale factor """
info = self._ws.componentInfo()
sf = V3D(0,0,0)
info.setScaleFactor(0, sf)
self.assertEquals(info.scaleFactor(0), sf)
def test_hasValidShape(self):
""" Check for a valid shape """
info = self._ws.componentInfo()
self.assertEquals(info.hasValidShape(0), True)
def test_shape(self):
""" Check a shape is returned"""
info = self._ws.componentInfo()
self.assertEquals(type(info.shape(0)), CSGObject)
def test_createWorkspaceAndComponentInfo(self):
""" Try to create a workspace and see if ComponentInfo object is accessable """
dataX = [1,2,3,4,5]
dataY = [1,2,3,4,5]
workspace = CreateWorkspace(DataX=dataX, DataY=dataY)
info = workspace.componentInfo()
self.assertEquals(info.size(), 1)
def test_indexOfAny(self):
info = self._ws.componentInfo()
index = info.indexOfAny(info.name(info.root()))
# Root index and the discovered index should be the same
self.assertEquals(index, info.root())
def test_indexOfAny_throws(self):
info = self._ws.componentInfo()
with self.assertRaises(ValueError):
info.indexOfAny('fictitious')
"""
----------------------------------------------------------------------------
Extreme Tests
----------------------------------------------------------------------------
The following test cases test around boundary cases for the exposed methods.
"""
def test_isDetector_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.isDetector(-1)
def test_detectorsInSubtree_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.detectorsInSubtree(-1)
self.assertEquals(type(info.detectorsInSubtree(5)), np.ndarray)
def test_componentsInSubtree_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.componentsInSubtree(-1)
self.assertEquals(type(info.componentsInSubtree(5)), np.ndarray)
def test_position_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.position(-1)
self.assertEquals(type(info.position(0)), V3D)
self.assertEquals(type(info.position(5)), V3D)
def test_rotation_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.rotation(-1)
self.assertEquals(type(info.rotation(0)), Quat)
self.assertEquals(type(info.rotation(5)), Quat)
def test_relativePosition_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.relativePosition(-1)
self.assertEquals(type(info.relativePosition(0)), V3D)
self.assertEquals(type(info.relativePosition(5)), V3D)
def test_relativeRotation_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.relativeRotation(-1)
self.assertEquals(type(info.relativeRotation(0)), Quat)
self.assertEquals(type(info.relativeRotation(5)), Quat)
def test_setPosition_extreme(self):
info = self._ws.componentInfo()
pos = V3D(0,0,0)
with self.assertRaises(OverflowError):
info.setPosition(-1, pos)
def test_setRotation_extreme(self):
info = self._ws.componentInfo()
quat = Quat(0,0,0,0)
with self.assertRaises(OverflowError):
info.setRotation(-1, quat)
def test_hasParent_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.hasParent(-1)
self.assertTrue(info.hasParent(0))
self.assertFalse(info.hasParent(5))
def test_parent_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.parent(-1)
self.assertEquals(type(info.parent(0)), int)
self.assertEquals(type(info.parent(5)), int)
def test_children_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.children(-1)
self.assertEquals(type(info.children(0)), np.ndarray)
self.assertEquals(type(info.children(5)), np.ndarray)
def test_name_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.name(-1)
self.assertEquals(type(info.name(0)), str)
self.assertEquals(type(info.name(5)), str)
def test_scaleFactor_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.scaleFactor(-1)
self.assertEquals(type(info.scaleFactor(0)), V3D)
self.assertEquals(type(info.scaleFactor(5)), V3D)
def test_setScaleFactor_extreme(self):
info = self._ws.componentInfo()
sf = V3D(0,0,0)
with self.assertRaises(OverflowError):
info.setScaleFactor(-1, sf)
def test_hasValidShape_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.hasValidShape(-1)
self.assertEquals(type(info.hasValidShape(0)), bool)
self.assertEquals(type(info.hasValidShape(5)), bool)
def test_shape_extreme(self):
info = self._ws.componentInfo()
with self.assertRaises(OverflowError):
info.shape(-1)
self.assertEquals(type(info.shape(0)), CSGObject)
self.assertEquals(type(info.shape(5)), CSGObject)
"""
----------------------------------------------------------------------------
Exceptional Tests
----------------------------------------------------------------------------
Each of the tests below tries to pass invalid parameters to the exposed
methods and expect an error to be thrown.
"""
def test_size_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.size(0)
def test_isDetector_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.isDetector("Error")
with self.assertRaises(TypeError):
info.isDetector(10.0)
def test_detectorsInSubtree_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.detectorsInSubtree("Error")
with self.assertRaises(TypeError):
info.detectorsInSubtree(10.0)
def test_componentsInSubtree_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.componentsInSubtree("Error")
with self.assertRaises(TypeError):
info.componentsInSubtree(10.0)
def test_position_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.position("Zero")
with self.assertRaises(TypeError):
info.position(0.0)
def test_rotation_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.rotation("Zero")
with self.assertRaises(TypeError):
info.rotation(0.0)
def test_relativePosition_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.relativePosition("Zero")
with self.assertRaises(TypeError):
info.relativePosition(0.0)
def test_relativeRotation_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.relativeRotation("Zero")
with self.assertRaises(TypeError):
info.relativeRotation(0.0)
def test_setPosition_exceptional(self):
info = self._ws.componentInfo()
pos = [0,0,0]
with self.assertRaises(TypeError):
info.setPosition(0, pos)
def test_setRotation_exceptional(self):
info = self._ws.componentInfo()
rot = [0,0,0,0]
with self.assertRaises(TypeError):
info.setRotation(0, rot)
def test_hasSource_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.hasSource(0)
def test_hasSample_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.hasSample(0)
def test_source_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.source(0)
def test_sample_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.sample(0)
def test_sourcePosition_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.sourcePosition(0)
def test_samplePosition_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.samplePosition(0)
def test_hasParent_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.hasParent("Zero")
with self.assertRaises(TypeError):
info.hasParent(0.0)
def test_parent_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.parent("Zero")
with self.assertRaises(TypeError):
info.parent(0.0)
def test_children_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.children("Zero")
with self.assertRaises(TypeError):
info.children(0.0)
def test_name_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.name("Name")
with self.assertRaises(TypeError):
info.name(0.12)
def test_l1_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.l1(0)
def test_scaleFactor_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.scaleFactor("Scale factor")
with self.assertRaises(TypeError):
info.scaleFactor(0.12)
def test_setScaleFactor_exceptional(self):
info = self._ws.componentInfo()
sf = V3D(0,0,0)
with self.assertRaises(TypeError):
info.setScaleFactor("1", sf)
with self.assertRaises(TypeError):
info.setScaleFactor(1.0, sf)
def test_hasValidShape_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.hasValidShape("ValidShape")
with self.assertRaises(TypeError):
info.hasValidShape(1010.00)
def test_shape_exceptional(self):
info = self._ws.componentInfo()
with self.assertRaises(TypeError):
info.shape("Shape")
with self.assertRaises(TypeError):
info.shape(11.32)
def test_basic_iterator(self):
info = self._ws.componentInfo()
expected_iterations = len(info)
actual_iterations = len(list(iter(info)))
self.assertEquals(expected_iterations, actual_iterations)
it = iter(info)
self.assertEquals(next(it).index, 0)
self.assertEquals(next(it).index, 1)
def test_isDetector_via_iterator(self):
comp_info = self._ws.componentInfo()
n_detectors = len(self._ws.detectorInfo())
it = iter(comp_info)
self.assertEquals(next(it).isDetector, True)
self.assertEquals(next(it).isDetector, True)
self.assertEquals(next(it).isDetector, False)
self.assertEquals(next(it).isDetector, False)
def test_position_via_iterator(self):
comp_info = self._ws.componentInfo()
source_pos = comp_info.sourcePosition()
it = iter(comp_info)
# basic check on first detector position
self.assertTrue(next(it).position.distance(source_pos) > 0)
def test_children_via_iterator(self):
info = self._ws.componentInfo()
it = iter(info)
first_det = next(it)
self.assertEquals(type(first_det.children), np.ndarray)
self.assertEquals(len(first_det.children), 0)
root = next(it)
for root in it:
continue
self.assertEquals(root.index, info.root()) # sanity check
self.assertTrue(np.array_equal(root.children, np.array([0,1,2,3,4], dtype='uint64')))
def test_detectorsInSubtree_via_iterator(self):
info = self._ws.componentInfo()
it = iter(info)
first_det = next(it)
self.assertEquals(type(first_det.detectorsInSubtree), np.ndarray)
# For detectors, only contain own index
self.assertTrue(np.array_equal(first_det.detectorsInSubtree,np.array([0], dtype='uint64')))
root = next(it)
for root in it:
continue
self.assertTrue(np.array_equal(root.detectorsInSubtree, np.array([0,1], dtype='uint64')))
def test_componentsInSubtree_via_iterator(self):
info = self._ws.componentInfo()
it = iter(info)
first_det = next(it)
self.assertEquals(type(first_det.detectorsInSubtree), np.ndarray)
# For detectors, only contain own index
self.assertTrue(np.array_equal(first_det.componentsInSubtree,np.array([0], dtype='uint64')))
root = next(it)
for root in it:
continue
# All component indices expected including self
self.assertTrue(np.array_equal(root.componentsInSubtree, np.array([0,1,2,3,4,5], dtype='uint64')))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 8,748,822,791,729,085,000 | 35.584699 | 119 | 0.614439 | false |
abhikeshav/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_drivers_media_eth_oper.py | 1 | 182469 | """ Cisco_IOS_XR_drivers_media_eth_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR drivers\-media\-eth package operational data.
This module contains definitions
for the following management objects\:
ethernet\-interface\: Ethernet operational data
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class EthCtrlrAlarmStateEnum(Enum):
"""
EthCtrlrAlarmStateEnum
Ethernet alarm state
.. data:: ALARM_NOT_SUPPORTED = 0
Not supported on this interface
.. data:: ALARM_SET = 1
Alarm set
.. data:: ALARM_NOT_SET = 2
Alarm not set
"""
ALARM_NOT_SUPPORTED = 0
ALARM_SET = 1
ALARM_NOT_SET = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthCtrlrAlarmStateEnum']
class EtherAinsStatusEnum(Enum):
"""
EtherAinsStatusEnum
Ether ains status
.. data:: AINS_SOAK_STATUS_NONE = 0
AINS Soak timer not running
.. data:: AINS_SOAK_STATUS_PENDING = 1
AINS Soak timer pending
.. data:: AINS_SOAK_STATUS_RUNNING = 2
AINS Soak timer running
"""
AINS_SOAK_STATUS_NONE = 0
AINS_SOAK_STATUS_PENDING = 1
AINS_SOAK_STATUS_RUNNING = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EtherAinsStatusEnum']
class EtherDomAlarmEnum(Enum):
"""
EtherDomAlarmEnum
Ether dom alarm
.. data:: NO_INFORMATION = 0
DOM Alarm information is not available
.. data:: ALARM_HIGH = 1
Alarm high
.. data:: WARNING_HIGH = 2
Warning high
.. data:: NORMAL = 3
Within normal parameters
.. data:: WARNING_LOW = 4
Warning low
.. data:: ALARM_LOW = 5
Alarm low
"""
NO_INFORMATION = 0
ALARM_HIGH = 1
WARNING_HIGH = 2
NORMAL = 3
WARNING_LOW = 4
ALARM_LOW = 5
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EtherDomAlarmEnum']
class EtherFlowcontrolEnum(Enum):
"""
EtherFlowcontrolEnum
Flowcontrol type
.. data:: NO_FLOWCONTROL = 0
No flow control (disabled)
.. data:: EGRESS = 1
Traffic egress (pause frames ingress)
.. data:: INGRESS = 2
Traffic ingress (pause frames egress)
.. data:: BIDIRECTIONAL = 3
On both ingress and egress
"""
NO_FLOWCONTROL = 0
EGRESS = 1
INGRESS = 2
BIDIRECTIONAL = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EtherFlowcontrolEnum']
class EtherLedStateEnum(Enum):
"""
EtherLedStateEnum
Ether led state
.. data:: LED_STATE_UNKNOWN = 0
LED state is unknown
.. data:: LED_OFF = 1
LED is off
.. data:: GREEN_ON = 2
LED is green
.. data:: GREEN_FLASHING = 3
LED is flashing green
.. data:: YELLOW_ON = 4
LED is yellow
.. data:: YELLOW_FLASHING = 5
LED is flashing yellow
.. data:: RED_ON = 6
LED is red
.. data:: RED_FLASHING = 7
LED is flashing red
"""
LED_STATE_UNKNOWN = 0
LED_OFF = 1
GREEN_ON = 2
GREEN_FLASHING = 3
YELLOW_ON = 4
YELLOW_FLASHING = 5
RED_ON = 6
RED_FLASHING = 7
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EtherLedStateEnum']
class EtherLinkStateEnum(Enum):
"""
EtherLinkStateEnum
Ethernet link state\: IEEE 802.3/802.3ae clause 30
.5.1.1.4
.. data:: STATE_UNDEFINED = 0
State undefined
.. data:: UNKNOWN_STATE = 1
Initializing, true state not yet known
.. data:: AVAILABLE = 2
Link or light normal, loopback normal
.. data:: NOT_AVAILABLE = 3
Link loss or low light, no loopback
.. data:: REMOTE_FAULT = 4
Remote fault with no detail
.. data:: INVALID_SIGNAL = 5
Invalid signal, applies only to 10BASE-FB
.. data:: REMOTE_JABBER = 6
Remote fault, reason known to be jabber
.. data:: LINK_LOSS = 7
Remote fault, reason known to be far-end link
loss
.. data:: REMOTE_TEST = 8
Remote fault, reason known to be test
.. data:: OFFLINE = 9
Offline (applies to auto-negotiation)
.. data:: AUTO_NEG_ERROR = 10
Auto-Negotiation Error
.. data:: PMD_LINK_FAULT = 11
PMD/PMA receive link fault
.. data:: FRAME_LOSS = 12
WIS loss of frames
.. data:: SIGNAL_LOSS = 13
WIS loss of signal
.. data:: LINK_FAULT = 14
PCS receive link fault
.. data:: EXCESSIVE_BER = 15
PCS Bit Error Rate monitor reporting excessive
error rate
.. data:: DXS_LINK_FAULT = 16
DTE XGXS receive link fault
.. data:: PXS_LINK_FAULT = 17
PHY XGXS transmit link fault
.. data:: SECURITY = 18
Security failure (not a valid part)
.. data:: PHY_NOT_PRESENT = 19
The optics for the port are not present
.. data:: NO_OPTIC_LICENSE = 20
License error (No advanced optical license)
.. data:: UNSUPPORTED_MODULE = 21
Module is not supported
.. data:: DWDM_LASER_SHUT = 22
DWDM Laser shutdown
.. data:: WANPHY_LASER_SHUT = 23
WANPHY Laser shutdown
.. data:: INCOMPATIBLE_CONFIG = 24
Incompatible configuration
.. data:: SYSTEM_ERROR = 25
System error
.. data:: WAN_FRAMING_ERROR = 26
WAN Framing Error
.. data:: OTN_FRAMING_ERROR = 27
OTN Framing Error
"""
STATE_UNDEFINED = 0
UNKNOWN_STATE = 1
AVAILABLE = 2
NOT_AVAILABLE = 3
REMOTE_FAULT = 4
INVALID_SIGNAL = 5
REMOTE_JABBER = 6
LINK_LOSS = 7
REMOTE_TEST = 8
OFFLINE = 9
AUTO_NEG_ERROR = 10
PMD_LINK_FAULT = 11
FRAME_LOSS = 12
SIGNAL_LOSS = 13
LINK_FAULT = 14
EXCESSIVE_BER = 15
DXS_LINK_FAULT = 16
PXS_LINK_FAULT = 17
SECURITY = 18
PHY_NOT_PRESENT = 19
NO_OPTIC_LICENSE = 20
UNSUPPORTED_MODULE = 21
DWDM_LASER_SHUT = 22
WANPHY_LASER_SHUT = 23
INCOMPATIBLE_CONFIG = 24
SYSTEM_ERROR = 25
WAN_FRAMING_ERROR = 26
OTN_FRAMING_ERROR = 27
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EtherLinkStateEnum']
class EtherPhyPresentEnum(Enum):
"""
EtherPhyPresentEnum
Ether phy present
.. data:: PHY_NOT_PRESENT = 0
No PHY present
.. data:: PHY_PRESENT = 1
PHY is present
.. data:: NO_INFORMATION = 2
State is unknown
"""
PHY_NOT_PRESENT = 0
PHY_PRESENT = 1
NO_INFORMATION = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EtherPhyPresentEnum']
class EthernetBertErrCntEnum(Enum):
"""
EthernetBertErrCntEnum
Ethernet bert err cnt
.. data:: NO_COUNT_TYPE = 0
no count type
.. data:: BIT_ERROR_COUNT = 1
bit error count
.. data:: FRAME_ERROR_COUNT = 2
frame error count
.. data:: BLOCK_ERROR_COUNT = 3
block error count
.. data:: ETHERNET_BERT_ERR_CNT_TYPES = 4
ethernet bert err cnt types
"""
NO_COUNT_TYPE = 0
BIT_ERROR_COUNT = 1
FRAME_ERROR_COUNT = 2
BLOCK_ERROR_COUNT = 3
ETHERNET_BERT_ERR_CNT_TYPES = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetBertErrCntEnum']
class EthernetBertPatternEnum(Enum):
"""
EthernetBertPatternEnum
Ethernet test patterns (IEEE spec 36A/48A)
.. data:: NO_TEST_PATTERN = 0
no test pattern
.. data:: HIGH_FREQUENCY = 1
high frequency
.. data:: LOW_FREQUENCY = 2
low frequency
.. data:: MIXED_FREQUENCY = 3
mixed frequency
.. data:: CONTINUOUS_RANDOM = 4
continuous random
.. data:: CONTINUOUS_JITTER = 5
continuous jitter
.. data:: LONG_CONTINUOUS_RANDOM = 6
long continuous random
.. data:: SHORT_CONTINUOUS_RANDOM = 7
short continuous random
.. data:: PSEUDORANDOM_SEED_A = 8
pseudorandom seed a
.. data:: PSEUDORANDOM_SEED_B = 9
pseudorandom seed b
.. data:: PRBS31 = 10
prbs31
.. data:: SQUARE_WAVE = 11
square wave
.. data:: PSEUDORANDOM = 12
pseudorandom
.. data:: ETHERNET_BERT_PATTERN_TYPES = 13
ethernet bert pattern types
"""
NO_TEST_PATTERN = 0
HIGH_FREQUENCY = 1
LOW_FREQUENCY = 2
MIXED_FREQUENCY = 3
CONTINUOUS_RANDOM = 4
CONTINUOUS_JITTER = 5
LONG_CONTINUOUS_RANDOM = 6
SHORT_CONTINUOUS_RANDOM = 7
PSEUDORANDOM_SEED_A = 8
PSEUDORANDOM_SEED_B = 9
PRBS31 = 10
SQUARE_WAVE = 11
PSEUDORANDOM = 12
ETHERNET_BERT_PATTERN_TYPES = 13
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetBertPatternEnum']
class EthernetDevEnum(Enum):
"""
EthernetDevEnum
Ethernet dev
.. data:: NO_DEVICE = 0
no device
.. data:: PMA_PMD = 1
pma pmd
.. data:: WIS = 2
wis
.. data:: PCS = 3
pcs
.. data:: PHY_XS = 4
phy xs
.. data:: DTE_XS = 5
dte xs
.. data:: ETHERNET_NUM_DEV = 6
ethernet num dev
"""
NO_DEVICE = 0
PMA_PMD = 1
WIS = 2
PCS = 3
PHY_XS = 4
DTE_XS = 5
ETHERNET_NUM_DEV = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetDevEnum']
class EthernetDevIfEnum(Enum):
"""
EthernetDevIfEnum
Ethernet dev if
.. data:: NO_INTERFACE = 0
no interface
.. data:: XGMII = 1
xgmii
.. data:: XAUI = 2
xaui
.. data:: ETHERNET_NUM_DEV_IF = 3
ethernet num dev if
"""
NO_INTERFACE = 0
XGMII = 1
XAUI = 2
ETHERNET_NUM_DEV_IF = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetDevIfEnum']
class EthernetDuplexEnum(Enum):
"""
EthernetDuplexEnum
Duplexity
.. data:: ETHERNET_DUPLEX_INVALID = 0
ethernet duplex invalid
.. data:: HALF_DUPLEX = 1
half duplex
.. data:: FULL_DUPLEX = 2
full duplex
"""
ETHERNET_DUPLEX_INVALID = 0
HALF_DUPLEX = 1
FULL_DUPLEX = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetDuplexEnum']
class EthernetFecEnum(Enum):
"""
EthernetFecEnum
FEC type
.. data:: NOT_CONFIGURED = 0
FEC not configured
.. data:: STANDARD = 1
Reed-Solomon encoding
.. data:: DISABLED = 2
FEC explicitly disabled
"""
NOT_CONFIGURED = 0
STANDARD = 1
DISABLED = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetFecEnum']
class EthernetIpgEnum(Enum):
"""
EthernetIpgEnum
Inter packet gap
.. data:: STANDARD = 0
IEEE standard value of 12
.. data:: NON_STANDARD = 1
Non-standard value of 16
"""
STANDARD = 0
NON_STANDARD = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetIpgEnum']
class EthernetLoopbackEnum(Enum):
"""
EthernetLoopbackEnum
Loopback type
.. data:: NO_LOOPBACK = 0
Disabled
.. data:: INTERNAL = 1
Loopback in the framer
.. data:: LINE = 2
Loops peer's packets back to them
.. data:: EXTERNAL = 3
tx externally connected to rx
"""
NO_LOOPBACK = 0
INTERNAL = 1
LINE = 2
EXTERNAL = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetLoopbackEnum']
class EthernetMediaEnum(Enum):
"""
EthernetMediaEnum
Ethernet media types\: IEEE 802.3/802.3ae clause
30.5.1.1.2
.. data:: ETHERNET_OTHER = 0
IEEE 802.3/802.3ae clause 30.2.5
.. data:: ETHERNET_UNKNOWN = 1
Initializing, true state or type not yet known
.. data:: ETHERNET_AUI = 2
No internal MAU, view from AUI
.. data:: ETHERNET_10BASE5 = 3
Thick coax MAU
.. data:: ETHERNET_FOIRL = 4
FOIRL MAU as specified in 9.9
.. data:: ETHERNET_10BASE2 = 5
Thin coax MAU
.. data:: ETHERNET_10BROAD36 = 6
Broadband DTE MAU
.. data:: ETHERNET_10BASE = 7
UTP MAU, duplexity unknown
.. data:: ETHERNET_10BASE_THD = 8
UTP MAU, half duplex
.. data:: ETHERNET_10BASE_TFD = 9
UTP MAU, full duplex
.. data:: ETHERNET_10BASE_FP = 10
Passive fiber MAU
.. data:: ETHERNET_10BASE_FB = 11
Synchronous fiber MAU
.. data:: ETHERNET_10BASE_FL = 12
Asynchronous fiber MAU, duplexity unknown
.. data:: ETHERNET_10BASE_FLHD = 13
Asynchronous fiber MAU, half duplex
.. data:: ETHERNET_10BASE_FLFD = 14
Asynchronous fiber MAU, full duplex
.. data:: ETHERNET_100BASE_T4 = 15
Four-pair Category 3 UTP
.. data:: ETHERNET_100BASE_TX = 16
Two-pair Category 5 UTP, duplexity unknown
.. data:: ETHERNET_100BASE_TXHD = 17
Two-pair Category 5 UTP, half duplex
.. data:: ETHERNET_100BASE_TXFD = 18
Two-pair Category 5 UTP, full duplex
.. data:: ETHERNET_100BASE_FX = 19
X fiber over PMD, duplexity unknown
.. data:: ETHERNET_100BASE_FXHD = 20
X fiber over PMD, half duplex
.. data:: ETHERNET_100BASE_FXFD = 21
X fiber over PMD, full duplex
.. data:: ETHERNET_100BASE_EX = 22
X fiber over PMD (40km), duplexity unknown
.. data:: ETHERNET_100BASE_EXHD = 23
X fiber over PMD (40km), half duplex
.. data:: ETHERNET_100BASE_EXFD = 24
X fiber over PMD (40km), full duplex
.. data:: ETHERNET_100BASE_T2 = 25
Two-pair Category 3 UTP, duplexity unknown
.. data:: ETHERNET_100BASE_T2HD = 26
Two-pair Category 3 UTP, half duplex
.. data:: ETHERNET_100BASE_T2FD = 27
Two-pair Category 3 UTP, full duplex
.. data:: ETHERNET_1000BASE_X = 28
X PCS/PMA, duplexity unknown
.. data:: ETHERNET_1000BASE_XHD = 29
X 1000BASE-XHDX PCS/PMA, half duplex
.. data:: ETHERNET_1000BASE_XFD = 30
X PCS/PMA, full duplex
.. data:: ETHERNET_1000BASE_LX = 31
X fiber over long-wl laser PMD, duplexity
unknown
.. data:: ETHERNET_1000BASE_LXHD = 32
X fiber over long-wl laser PMD, half duplex
.. data:: ETHERNET_1000BASE_LXFD = 33
X fiber over long-wl laser PMD, full duplex
.. data:: ETHERNET_1000BASE_SX = 34
X fiber over short-wl laser PMD, duplexity
unknown
.. data:: ETHERNET_1000BASE_SXHD = 35
X fiber over short-wl laser PMD, half duplex
.. data:: ETHERNET_1000BASE_SXFD = 36
X fiber over short-wl laser PMD, full duplex
.. data:: ETHERNET_1000BASE_CX = 37
X copper over 150-Ohm balanced PMD, duplexity
unknown
.. data:: ETHERNET_1000BASE_CXHD = 38
X copper over 150-Ohm balancedPMD, half duplex
.. data:: ETHERNET_1000BASE_CXFD = 39
X copper over 150-Ohm balancedPMD, full duplex
.. data:: ETHERNET_1000BASE = 40
Four-pair Category 5 UTP PHY, duplexity unknown
.. data:: ETHERNET_1000BASE_THD = 41
Four-pair Category 5 UTP PHY, half duplex
.. data:: ETHERNET_1000BASE_TFD = 42
Four-pair Category 5 UTP PHY, full duplex
.. data:: ETHERNET_10GBASE_X = 43
X PCS/PMA
.. data:: ETHERNET_10GBASE_LX4 = 44
X fiber over 4 lane 1310nm optics
.. data:: ETHERNET_10GBASE_R = 45
R PCS/PMA
.. data:: ETHERNET_10GBASE_ER = 46
R fiber over 1550nm optics
.. data:: ETHERNET_10GBASE_LR = 47
R fiber over 1310nm optics
.. data:: ETHERNET_10GBASE_SR = 48
R fiber over 850nm optics
.. data:: ETHERNET_10GBASE_W = 49
W PCS/PMA
.. data:: ETHERNET_10GBASE_EW = 50
W fiber over 1550nm optics
.. data:: ETHERNET_10GBASE_LW = 51
W fiber over 1310nm optics
.. data:: ETHERNET_10GBASE_SW = 52
W fiber over 850nm optics
.. data:: ETHERNET_1000BASE_ZX = 53
Single-mode fiber over 1550nm optics (Cisco)
.. data:: ETHERNET_1000BASE_CWDM = 54
CWDM with unknown wavelength optics
.. data:: ETHERNET_1000BASE_CWDM_1470 = 55
CWDM with 1470nm optics
.. data:: ETHERNET_1000BASE_CWDM_1490 = 56
CWDM with 1490nm optics
.. data:: ETHERNET_1000BASE_CWDM_1510 = 57
CWDM with 1510nm optics
.. data:: ETHERNET_1000BASE_CWDM_1530 = 58
CWDM with 1530nm optics
.. data:: ETHERNET_1000BASE_CWDM_1550 = 59
CWDM with 1550nm optics
.. data:: ETHERNET_1000BASE_CWDM_1570 = 60
CWDM with 1570nm optics
.. data:: ETHERNET_1000BASE_CWDM_1590 = 61
CWDM with 1590nm optics
.. data:: ETHERNET_1000BASE_CWDM_1610 = 62
CWDM with 1610nm optics
.. data:: ETHERNET_10GBASE_ZR = 63
Cisco-defined, over 1550nm optics
.. data:: ETHERNET_10GBASE_DWDM = 64
DWDM optics
.. data:: ETHERNET_100GBASE_LR4 = 65
fiber over 4 lane optics (long reach)
.. data:: ETHERNET_1000BASE_DWDM = 66
DWDM optics
.. data:: ETHERNET_1000BASE_DWDM_1533 = 67
DWDM with 1533nm optics
.. data:: ETHERNET_1000BASE_DWDM_1537 = 68
DWDM with 1537nm optics
.. data:: ETHERNET_1000BASE_DWDM_1541 = 69
DWDM with 1541nm optics
.. data:: ETHERNET_1000BASE_DWDM_1545 = 70
DWDM with 1545nm optics
.. data:: ETHERNET_1000BASE_DWDM_1549 = 71
DWDM with 1549nm optics
.. data:: ETHERNET_1000BASE_DWDM_1553 = 72
DWDM with 1553nm optics
.. data:: ETHERNET_1000BASE_DWDM_1557 = 73
DWDM with 1557nm optics
.. data:: ETHERNET_1000BASE_DWDM_1561 = 74
DWDM with 1561nm optics
.. data:: ETHERNET_40GBASE_LR4 = 75
fiber over 4 lane optics (long reach)
.. data:: ETHERNET_40GBASE_ER4 = 76
fiber over 4 lane optics (extended reach)
.. data:: ETHERNET_100GBASE_ER4 = 77
fiber over 4 lane optics (extended reach)
.. data:: ETHERNET_1000BASE_EX = 78
X fiber over 1310nm optics
.. data:: ETHERNET_1000BASE_BX10_D = 79
X fibre (D, 10km)
.. data:: ETHERNET_1000BASE_BX10_U = 80
X fibre (U, 10km)
.. data:: ETHERNET_1000BASE_DWDM_1561_42 = 81
DWDM with 1561.42nm optics
.. data:: ETHERNET_1000BASE_DWDM_1560_61 = 82
DWDM with 1560.61nm optics
.. data:: ETHERNET_1000BASE_DWDM_1559_79 = 83
DWDM with 1559.79nm optics
.. data:: ETHERNET_1000BASE_DWDM_1558_98 = 84
DWDM with 1558.98nm optics
.. data:: ETHERNET_1000BASE_DWDM_1558_17 = 85
DWDM with 1558.17nm optics
.. data:: ETHERNET_1000BASE_DWDM_1557_36 = 86
DWDM with 1557.36nm optics
.. data:: ETHERNET_1000BASE_DWDM_1556_55 = 87
DWDM with 1556.55nm optics
.. data:: ETHERNET_1000BASE_DWDM_1555_75 = 88
DWDM with 1555.75nm optics
.. data:: ETHERNET_1000BASE_DWDM_1554_94 = 89
DWDM with 1554.94nm optics
.. data:: ETHERNET_1000BASE_DWDM_1554_13 = 90
DWDM with 1554.13nm optics
.. data:: ETHERNET_1000BASE_DWDM_1553_33 = 91
DWDM with 1553.33nm optics
.. data:: ETHERNET_1000BASE_DWDM_1552_52 = 92
DWDM with 1552.52nm optics
.. data:: ETHERNET_1000BASE_DWDM_1551_72 = 93
DWDM with 1551.72nm optics
.. data:: ETHERNET_1000BASE_DWDM_1550_92 = 94
DWDM with 1550.92nm optics
.. data:: ETHERNET_1000BASE_DWDM_1550_12 = 95
DWDM with 1550.12nm optics
.. data:: ETHERNET_1000BASE_DWDM_1549_32 = 96
DWDM with 1549.32nm optics
.. data:: ETHERNET_1000BASE_DWDM_1548_51 = 97
DWDM with 1548.51nm optics
.. data:: ETHERNET_1000BASE_DWDM_1547_72 = 98
DWDM with 1547.72nm optics
.. data:: ETHERNET_1000BASE_DWDM_1546_92 = 99
DWDM with 1546.92nm optics
.. data:: ETHERNET_1000BASE_DWDM_1546_12 = 100
DWDM with 1546.12nm optics
.. data:: ETHERNET_1000BASE_DWDM_1545_32 = 101
DWDM with 1545.32nm optics
.. data:: ETHERNET_1000BASE_DWDM_1544_53 = 102
DWDM with 1544.53nm optics
.. data:: ETHERNET_1000BASE_DWDM_1543_73 = 103
DWDM with 1543.73nm optics
.. data:: ETHERNET_1000BASE_DWDM_1542_94 = 104
DWDM with 1542.94nm optics
.. data:: ETHERNET_1000BASE_DWDM_1542_14 = 105
DWDM with 1542.14nm optics
.. data:: ETHERNET_1000BASE_DWDM_1541_35 = 106
DWDM with 1541.35nm optics
.. data:: ETHERNET_1000BASE_DWDM_1540_56 = 107
DWDM with 1540.56nm optics
.. data:: ETHERNET_1000BASE_DWDM_1539_77 = 108
DWDM with 1539.77nm optics
.. data:: ETHERNET_1000BASE_DWDM_1538_98 = 109
DWDM with 1538.98nm optics
.. data:: ETHERNET_1000BASE_DWDM_1538_19 = 110
DWDM with 1538.19nm optics
.. data:: ETHERNET_1000BASE_DWDM_1537_40 = 111
DWDM with 1537.40nm optics
.. data:: ETHERNET_1000BASE_DWDM_1536_61 = 112
DWDM with 1536.61nm optics
.. data:: ETHERNET_1000BASE_DWDM_1535_82 = 113
DWDM with 1535.82nm optics
.. data:: ETHERNET_1000BASE_DWDM_1535_04 = 114
DWDM with 1535.04nm optics
.. data:: ETHERNET_1000BASE_DWDM_1534_25 = 115
DWDM with 1534.25nm optics
.. data:: ETHERNET_1000BASE_DWDM_1533_47 = 116
DWDM with 1533.47nm optics
.. data:: ETHERNET_1000BASE_DWDM_1532_68 = 117
DWDM with 1532.68nm optics
.. data:: ETHERNET_1000BASE_DWDM_1531_90 = 118
DWDM with 1531.90nm optics
.. data:: ETHERNET_1000BASE_DWDM_1531_12 = 119
DWDM with 1531.12nm optics
.. data:: ETHERNET_1000BASE_DWDM_1530_33 = 120
DWDM with 1530.33nm optics
.. data:: ETHERNET_1000BASE_DWDM_TUNABLE = 121
DWDM with tunable optics
.. data:: ETHERNET_10GBASE_DWDM_1561_42 = 122
DWDM with 1561.42nm optics
.. data:: ETHERNET_10GBASE_DWDM_1560_61 = 123
DWDM with 1560.61nm optics
.. data:: ETHERNET_10GBASE_DWDM_1559_79 = 124
DWDM with 1559.79nm optics
.. data:: ETHERNET_10GBASE_DWDM_1558_98 = 125
DWDM with 1558.98nm optics
.. data:: ETHERNET_10GBASE_DWDM_1558_17 = 126
DWDM with 1558.17nm optics
.. data:: ETHERNET_10GBASE_DWDM_1557_36 = 127
DWDM with 1557.36nm optics
.. data:: ETHERNET_10GBASE_DWDM_1556_55 = 128
DWDM with 1556.55nm optics
.. data:: ETHERNET_10GBASE_DWDM_1555_75 = 129
DWDM with 1555.75nm optics
.. data:: ETHERNET_10GBASE_DWDM_1554_94 = 130
DWDM with 1554.94nm optics
.. data:: ETHERNET_10GBASE_DWDM_1554_13 = 131
DWDM with 1554.13nm optics
.. data:: ETHERNET_10GBASE_DWDM_1553_33 = 132
DWDM with 1553.33nm optics
.. data:: ETHERNET_10GBASE_DWDM_1552_52 = 133
DWDM with 1552.52nm optics
.. data:: ETHERNET_10GBASE_DWDM_1551_72 = 134
DWDM with 1551.72nm optics
.. data:: ETHERNET_10GBASE_DWDM_1550_92 = 135
DWDM with 1550.92nm optics
.. data:: ETHERNET_10GBASE_DWDM_1550_12 = 136
DWDM with 1550.12nm optics
.. data:: ETHERNET_10GBASE_DWDM_1549_32 = 137
DWDM with 1549.32nm optics
.. data:: ETHERNET_10GBASE_DWDM_1548_51 = 138
DWDM with 1548.51nm optics
.. data:: ETHERNET_10GBASE_DWDM_1547_72 = 139
DWDM with 1547.72nm optics
.. data:: ETHERNET_10GBASE_DWDM_1546_92 = 140
DWDM with 1546.92nm optics
.. data:: ETHERNET_10GBASE_DWDM_1546_12 = 141
DWDM with 1546.12nm optics
.. data:: ETHERNET_10GBASE_DWDM_1545_32 = 142
DWDM with 1545.32nm optics
.. data:: ETHERNET_10GBASE_DWDM_1544_53 = 143
DWDM with 1544.53nm optics
.. data:: ETHERNET_10GBASE_DWDM_1543_73 = 144
DWDM with 1543.73nm optics
.. data:: ETHERNET_10GBASE_DWDM_1542_94 = 145
DWDM with 1542.94nm optics
.. data:: ETHERNET_10GBASE_DWDM_1542_14 = 146
DWDM with 1542.14nm optics
.. data:: ETHERNET_10GBASE_DWDM_1541_35 = 147
DWDM with 1541.35nm optics
.. data:: ETHERNET_10GBASE_DWDM_1540_56 = 148
DWDM with 1540.56nm optics
.. data:: ETHERNET_10GBASE_DWDM_1539_77 = 149
DWDM with 1539.77nm optics
.. data:: ETHERNET_10GBASE_DWDM_1538_98 = 150
DWDM with 1538.98nm optics
.. data:: ETHERNET_10GBASE_DWDM_1538_19 = 151
DWDM with 1538.19nm optics
.. data:: ETHERNET_10GBASE_DWDM_1537_40 = 152
DWDM with 1537.40nm optics
.. data:: ETHERNET_10GBASE_DWDM_1536_61 = 153
DWDM with 1536.61nm optics
.. data:: ETHERNET_10GBASE_DWDM_1535_82 = 154
DWDM with 1535.82nm optics
.. data:: ETHERNET_10GBASE_DWDM_1535_04 = 155
DWDM with 1535.04nm optics
.. data:: ETHERNET_10GBASE_DWDM_1534_25 = 156
DWDM with 1534.25nm optics
.. data:: ETHERNET_10GBASE_DWDM_1533_47 = 157
DWDM with 1533.47nm optics
.. data:: ETHERNET_10GBASE_DWDM_1532_68 = 158
DWDM with 1532.68nm optics
.. data:: ETHERNET_10GBASE_DWDM_1531_90 = 159
DWDM with 1531.90nm optics
.. data:: ETHERNET_10GBASE_DWDM_1531_12 = 160
DWDM with 1531.12nm optics
.. data:: ETHERNET_10GBASE_DWDM_1530_33 = 161
DWDM with 1530.33nm optics
.. data:: ETHERNET_10GBASE_DWDM_TUNABLE = 162
DWDM with tunable optics
.. data:: ETHERNET_40GBASE_DWDM_1561_42 = 163
DWDM with 1561.42nm optics
.. data:: ETHERNET_40GBASE_DWDM_1560_61 = 164
DWDM with 1560.61nm optics
.. data:: ETHERNET_40GBASE_DWDM_1559_79 = 165
DWDM with 1559.79nm optics
.. data:: ETHERNET_40GBASE_DWDM_1558_98 = 166
DWDM with 1558.98nm optics
.. data:: ETHERNET_40GBASE_DWDM_1558_17 = 167
DWDM with 1558.17nm optics
.. data:: ETHERNET_40GBASE_DWDM_1557_36 = 168
DWDM with 1557.36nm optics
.. data:: ETHERNET_40GBASE_DWDM_1556_55 = 169
DWDM with 1556.55nm optics
.. data:: ETHERNET_40GBASE_DWDM_1555_75 = 170
DWDM with 1555.75nm optics
.. data:: ETHERNET_40GBASE_DWDM_1554_94 = 171
DWDM with 1554.94nm optics
.. data:: ETHERNET_40GBASE_DWDM_1554_13 = 172
DWDM with 1554.13nm optics
.. data:: ETHERNET_40GBASE_DWDM_1553_33 = 173
DWDM with 1553.33nm optics
.. data:: ETHERNET_40GBASE_DWDM_1552_52 = 174
DWDM with 1552.52nm optics
.. data:: ETHERNET_40GBASE_DWDM_1551_72 = 175
DWDM with 1551.72nm optics
.. data:: ETHERNET_40GBASE_DWDM_1550_92 = 176
DWDM with 1550.92nm optics
.. data:: ETHERNET_40GBASE_DWDM_1550_12 = 177
DWDM with 1550.12nm optics
.. data:: ETHERNET_40GBASE_DWDM_1549_32 = 178
DWDM with 1549.32nm optics
.. data:: ETHERNET_40GBASE_DWDM_1548_51 = 179
DWDM with 1548.51nm optics
.. data:: ETHERNET_40GBASE_DWDM_1547_72 = 180
DWDM with 1547.72nm optics
.. data:: ETHERNET_40GBASE_DWDM_1546_92 = 181
DWDM with 1546.92nm optics
.. data:: ETHERNET_40GBASE_DWDM_1546_12 = 182
DWDM with 1546.12nm optics
.. data:: ETHERNET_40GBASE_DWDM_1545_32 = 183
DWDM with 1545.32nm optics
.. data:: ETHERNET_40GBASE_DWDM_1544_53 = 184
DWDM with 1544.53nm optics
.. data:: ETHERNET_40GBASE_DWDM_1543_73 = 185
DWDM with 1543.73nm optics
.. data:: ETHERNET_40GBASE_DWDM_1542_94 = 186
DWDM with 1542.94nm optics
.. data:: ETHERNET_40GBASE_DWDM_1542_14 = 187
DWDM with 1542.14nm optics
.. data:: ETHERNET_40GBASE_DWDM_1541_35 = 188
DWDM with 1541.35nm optics
.. data:: ETHERNET_40GBASE_DWDM_1540_56 = 189
DWDM with 1540.56nm optics
.. data:: ETHERNET_40GBASE_DWDM_1539_77 = 190
DWDM with 1539.77nm optics
.. data:: ETHERNET_40GBASE_DWDM_1538_98 = 191
DWDM with 1538.98nm optics
.. data:: ETHERNET_40GBASE_DWDM_1538_19 = 192
DWDM with 1538.19nm optics
.. data:: ETHERNET_40GBASE_DWDM_1537_40 = 193
DWDM with 1537.40nm optics
.. data:: ETHERNET_40GBASE_DWDM_1536_61 = 194
DWDM with 1536.61nm optics
.. data:: ETHERNET_40GBASE_DWDM_1535_82 = 195
DWDM with 1535.82nm optics
.. data:: ETHERNET_40GBASE_DWDM_1535_04 = 196
DWDM with 1535.04nm optics
.. data:: ETHERNET_40GBASE_DWDM_1534_25 = 197
DWDM with 1534.25nm optics
.. data:: ETHERNET_40GBASE_DWDM_1533_47 = 198
DWDM with 1533.47nm optics
.. data:: ETHERNET_40GBASE_DWDM_1532_68 = 199
DWDM with 1532.68nm optics
.. data:: ETHERNET_40GBASE_DWDM_1531_90 = 200
DWDM with 1531.90nm optics
.. data:: ETHERNET_40GBASE_DWDM_1531_12 = 201
DWDM with 1531.12nm optics
.. data:: ETHERNET_40GBASE_DWDM_1530_33 = 202
DWDM with 1530.33nm optics
.. data:: ETHERNET_40GBASE_DWDM_TUNABLE = 203
DWDM with tunable optics
.. data:: ETHERNET_100GBASE_DWDM_1561_42 = 204
DWDM with 1561.42nm optics
.. data:: ETHERNET_100GBASE_DWDM_1560_61 = 205
DWDM with 1560.61nm optics
.. data:: ETHERNET_100GBASE_DWDM_1559_79 = 206
DWDM with 1559.79nm optics
.. data:: ETHERNET_100GBASE_DWDM_1558_98 = 207
DWDM with 1558.98nm optics
.. data:: ETHERNET_100GBASE_DWDM_1558_17 = 208
DWDM with 1558.17nm optics
.. data:: ETHERNET_100GBASE_DWDM_1557_36 = 209
DWDM with 1557.36nm optics
.. data:: ETHERNET_100GBASE_DWDM_1556_55 = 210
DWDM with 1556.55nm optics
.. data:: ETHERNET_100GBASE_DWDM_1555_75 = 211
DWDM with 1555.75nm optics
.. data:: ETHERNET_100GBASE_DWDM_1554_94 = 212
DWDM with 1554.94nm optics
.. data:: ETHERNET_100GBASE_DWDM_1554_13 = 213
DWDM with 1554.13nm optics
.. data:: ETHERNET_100GBASE_DWDM_1553_33 = 214
DWDM with 1553.33nm optics
.. data:: ETHERNET_100GBASE_DWDM_1552_52 = 215
DWDM with 1552.52nm optics
.. data:: ETHERNET_100GBASE_DWDM_1551_72 = 216
DWDM with 1551.72nm optics
.. data:: ETHERNET_100GBASE_DWDM_1550_92 = 217
DWDM with 1550.92nm optics
.. data:: ETHERNET_100GBASE_DWDM_1550_12 = 218
DWDM with 1550.12nm optics
.. data:: ETHERNET_100GBASE_DWDM_1549_32 = 219
DWDM with 1549.32nm optics
.. data:: ETHERNET_100GBASE_DWDM_1548_51 = 220
DWDM with 1548.51nm optics
.. data:: ETHERNET_100GBASE_DWDM_1547_72 = 221
DWDM with 1547.72nm optics
.. data:: ETHERNET_100GBASE_DWDM_1546_92 = 222
DWDM with 1546.92nm optics
.. data:: ETHERNET_100GBASE_DWDM_1546_12 = 223
DWDM with 1546.12nm optics
.. data:: ETHERNET_100GBASE_DWDM_1545_32 = 224
DWDM with 1545.32nm optics
.. data:: ETHERNET_100GBASE_DWDM_1544_53 = 225
DWDM with 1544.53nm optics
.. data:: ETHERNET_100GBASE_DWDM_1543_73 = 226
DWDM with 1543.73nm optics
.. data:: ETHERNET_100GBASE_DWDM_1542_94 = 227
DWDM with 1542.94nm optics
.. data:: ETHERNET_100GBASE_DWDM_1542_14 = 228
DWDM with 1542.14nm optics
.. data:: ETHERNET_100GBASE_DWDM_1541_35 = 229
DWDM with 1541.35nm optics
.. data:: ETHERNET_100GBASE_DWDM_1540_56 = 230
DWDM with 1540.56nm optics
.. data:: ETHERNET_100GBASE_DWDM_1539_77 = 231
DWDM with 1539.77nm optics
.. data:: ETHERNET_100GBASE_DWDM_1538_98 = 232
DWDM with 1538.98nm optics
.. data:: ETHERNET_100GBASE_DWDM_1538_19 = 233
DWDM with 1538.19nm optics
.. data:: ETHERNET_100GBASE_DWDM_1537_40 = 234
DWDM with 1537.40nm optics
.. data:: ETHERNET_100GBASE_DWDM_1536_61 = 235
DWDM with 1536.61nm optics
.. data:: ETHERNET_100GBASE_DWDM_1535_82 = 236
DWDM with 1535.82nm optics
.. data:: ETHERNET_100GBASE_DWDM_1535_04 = 237
DWDM with 1535.04nm optics
.. data:: ETHERNET_100GBASE_DWDM_1534_25 = 238
DWDM with 1534.25nm optics
.. data:: ETHERNET_100GBASE_DWDM_1533_47 = 239
DWDM with 1533.47nm optics
.. data:: ETHERNET_100GBASE_DWDM_1532_68 = 240
DWDM with 1532.68nm optics
.. data:: ETHERNET_100GBASE_DWDM_1531_90 = 241
DWDM with 1531.90nm optics
.. data:: ETHERNET_100GBASE_DWDM_1531_12 = 242
DWDM with 1531.12nm optics
.. data:: ETHERNET_100GBASE_DWDM_1530_33 = 243
DWDM with 1530.33nm optics
.. data:: ETHERNET_100GBASE_DWDM_TUNABLE = 244
DWDM with tunable optics
.. data:: ETHERNET_40GBASE_KR4 = 245
4 lane copper (backplane)
.. data:: ETHERNET_40GBASE_CR4 = 246
4 lane copper (very short reach)
.. data:: ETHERNET_40GBASE_SR4 = 247
fiber over 4 lane optics (short reach)
.. data:: ETHERNET_40GBASE_FR = 248
serial fiber (2+ km)
.. data:: ETHERNET_100GBASE_CR10 = 249
10 lane copper (very short reach)
.. data:: ETHERNET_100GBASE_SR10 = 250
MMF fiber over 10 lane optics (short reach)
.. data:: ETHERNET_40GBASE_CSR4 = 251
fiber over 4 lane optics (extended short reach)
.. data:: ETHERNET_10GBASE_CWDM = 252
CWDM optics
.. data:: ETHERNET_10GBASE_CWDM_TUNABLE = 253
CWDM with tunable optics
.. data:: ETHERNET_10GBASE_CWDM_1470 = 254
CWDM with 1470nm optics
.. data:: ETHERNET_10GBASE_CWDM_1490 = 255
CWDM with 1490nm optics
.. data:: ETHERNET_10GBASE_CWDM_1510 = 256
CWDM with 1510nm optics
.. data:: ETHERNET_10GBASE_CWDM_1530 = 257
CWDM with 1530nm optics
.. data:: ETHERNET_10GBASE_CWDM_1550 = 258
CWDM with 1550nm optics
.. data:: ETHERNET_10GBASE_CWDM_1570 = 259
CWDM with 1570nm optics
.. data:: ETHERNET_10GBASE_CWDM_1590 = 260
CWDM with 1590nm optics
.. data:: ETHERNET_10GBASE_CWDM_1610 = 261
CWDM with 1610nm optics
.. data:: ETHERNET_40GBASE_CWDM = 262
CWDM optics
.. data:: ETHERNET_40GBASE_CWDM_TUNABLE = 263
CWDM with tunable optics
.. data:: ETHERNET_40GBASE_CWDM_1470 = 264
CWDM with 1470nm optics
.. data:: ETHERNET_40GBASE_CWDM_1490 = 265
CWDM with 1490nm optics
.. data:: ETHERNET_40GBASE_CWDM_1510 = 266
CWDM with 1510nm optics
.. data:: ETHERNET_40GBASE_CWDM_1530 = 267
CWDM with 1530nm optics
.. data:: ETHERNET_40GBASE_CWDM_1550 = 268
CWDM with 1550nm optics
.. data:: ETHERNET_40GBASE_CWDM_1570 = 269
CWDM with 1570nm optics
.. data:: ETHERNET_40GBASE_CWDM_1590 = 270
CWDM with 1590nm optics
.. data:: ETHERNET_40GBASE_CWDM_1610 = 271
CWDM with 1610nm optics
.. data:: ETHERNET_100GBASE_CWDM = 272
CWDM optics
.. data:: ETHERNET_100GBASE_CWDM_TUNABLE = 273
CWDM with tunable optics
.. data:: ETHERNET_100GBASE_CWDM_1470 = 274
CWDM with 1470nm optics
.. data:: ETHERNET_100GBASE_CWDM_1490 = 275
CWDM with 1490nm optics
.. data:: ETHERNET_100GBASE_CWDM_1510 = 276
CWDM with 1510nm optics
.. data:: ETHERNET_100GBASE_CWDM_1530 = 277
CWDM with 1530nm optics
.. data:: ETHERNET_100GBASE_CWDM_1550 = 278
CWDM with 1550nm optics
.. data:: ETHERNET_100GBASE_CWDM_1570 = 279
CWDM with 1570nm optics
.. data:: ETHERNET_100GBASE_CWDM_1590 = 280
CWDM with 1590nm optics
.. data:: ETHERNET_100GBASE_CWDM_1610 = 281
CWDM with 1610nm optics
.. data:: ETHERNET_40GBASE_ELPB = 282
Electrical loopback
.. data:: ETHERNET_100GBASE_ELPB = 283
Electrical loopback
.. data:: ETHERNET_100GBASE_LR10 = 284
Fiber over 10 lane optics (long reach)
.. data:: ETHERNET_40GBASE = 285
Four-pair Category 8 STP
.. data:: ETHERNET_100GBASE_KP4 = 286
4 lane copper (backplane)
.. data:: ETHERNET_100GBASE_KR4 = 287
Improved 4 lane copper (backplane)
.. data:: ETHERNET_10GBASE_LRM = 288
Multimode fiber with 1310nm optics (long reach)
.. data:: ETHERNET_10GBASE_CX4 = 289
4 lane X copper
.. data:: ETHERNET_10GBASE = 290
Four-pair Category 6+ UTP
.. data:: ETHERNET_10GBASE_KX4 = 291
4 lane X copper (backplane)
.. data:: ETHERNET_10GBASE_KR = 292
Copper (backplane)
.. data:: ETHERNET_10GBASE_PR = 293
Passive optical network
.. data:: ETHERNET_100BASE_LX = 294
X fiber over 4 lane 1310nm optics
.. data:: ETHERNET_100BASE_ZX = 295
Single-mode fiber over 1550nm optics (Cisco)
.. data:: ETHERNET_1000BASE_BX_D = 296
X fibre (D)
.. data:: ETHERNET_1000BASE_BX_U = 297
X fibre (U)
.. data:: ETHERNET_1000BASE_BX20_D = 298
X fibre (D, 20km)
.. data:: ETHERNET_1000BASE_BX20_U = 299
X fibre (U, 20km)
.. data:: ETHERNET_1000BASE_BX40_D = 300
X fibre (D, 40km)
.. data:: ETHERNET_1000BASE_BX40_DA = 301
X fibre (D, 40km)
.. data:: ETHERNET_1000BASE_BX40_U = 302
X fibre (U, 40km)
.. data:: ETHERNET_1000BASE_BX80_D = 303
X fibre (D, 80km)
.. data:: ETHERNET_1000BASE_BX80_U = 304
X fibre (U, 80km)
.. data:: ETHERNET_1000BASE_BX120_D = 305
X fibre (D, 120km)
.. data:: ETHERNET_1000BASE_BX120_U = 306
X fibre (U, 120km)
.. data:: ETHERNET_10GBASE_BX_D = 307
X fibre (D)
.. data:: ETHERNET_10GBASE_BX_U = 308
X fibre (U)
.. data:: ETHERNET_10GBASE_BX10_D = 309
X fibre (D, 10km)
.. data:: ETHERNET_10GBASE_BX10_U = 310
X fibre (U, 10km)
.. data:: ETHERNET_10GBASE_BX20_D = 311
X fibre (D, 20km)
.. data:: ETHERNET_10GBASE_BX20_U = 312
X fibre (U, 20km)
.. data:: ETHERNET_10GBASE_BX40_D = 313
X fibre (D, 40km)
.. data:: ETHERNET_10GBASE_BX40_U = 314
X fibre (U, 40km)
.. data:: ETHERNET_10GBASE_BX80_D = 315
X fibre (D, 80km)
.. data:: ETHERNET_10GBASE_BX80_U = 316
X fibre (U, 80km)
.. data:: ETHERNET_10GBASE_BX120_D = 317
X fibre (D, 120km)
.. data:: ETHERNET_10GBASE_BX120_U = 318
X fibre (U, 120km)
.. data:: ETHERNET_1000BASE_DR_LX = 319
X fiber over long-wl laser PMD, duplexity
unknown, dual rate
.. data:: ETHERNET_100GBASE_ER4L = 320
fiber over 4 lane optics (25km reach)
.. data:: ETHERNET_100GBASE_SR4 = 321
fiber over 4 lane optics (short reach)
.. data:: ETHERNET_40GBASE_SR_BD = 322
Bi-directional fiber over 2 lane optics (short
reach)
.. data:: ETHERNET_BASE_MAX = 323
ethernet base max
"""
ETHERNET_OTHER = 0
ETHERNET_UNKNOWN = 1
ETHERNET_AUI = 2
ETHERNET_10BASE5 = 3
ETHERNET_FOIRL = 4
ETHERNET_10BASE2 = 5
ETHERNET_10BROAD36 = 6
ETHERNET_10BASE = 7
ETHERNET_10BASE_THD = 8
ETHERNET_10BASE_TFD = 9
ETHERNET_10BASE_FP = 10
ETHERNET_10BASE_FB = 11
ETHERNET_10BASE_FL = 12
ETHERNET_10BASE_FLHD = 13
ETHERNET_10BASE_FLFD = 14
ETHERNET_100BASE_T4 = 15
ETHERNET_100BASE_TX = 16
ETHERNET_100BASE_TXHD = 17
ETHERNET_100BASE_TXFD = 18
ETHERNET_100BASE_FX = 19
ETHERNET_100BASE_FXHD = 20
ETHERNET_100BASE_FXFD = 21
ETHERNET_100BASE_EX = 22
ETHERNET_100BASE_EXHD = 23
ETHERNET_100BASE_EXFD = 24
ETHERNET_100BASE_T2 = 25
ETHERNET_100BASE_T2HD = 26
ETHERNET_100BASE_T2FD = 27
ETHERNET_1000BASE_X = 28
ETHERNET_1000BASE_XHD = 29
ETHERNET_1000BASE_XFD = 30
ETHERNET_1000BASE_LX = 31
ETHERNET_1000BASE_LXHD = 32
ETHERNET_1000BASE_LXFD = 33
ETHERNET_1000BASE_SX = 34
ETHERNET_1000BASE_SXHD = 35
ETHERNET_1000BASE_SXFD = 36
ETHERNET_1000BASE_CX = 37
ETHERNET_1000BASE_CXHD = 38
ETHERNET_1000BASE_CXFD = 39
ETHERNET_1000BASE = 40
ETHERNET_1000BASE_THD = 41
ETHERNET_1000BASE_TFD = 42
ETHERNET_10GBASE_X = 43
ETHERNET_10GBASE_LX4 = 44
ETHERNET_10GBASE_R = 45
ETHERNET_10GBASE_ER = 46
ETHERNET_10GBASE_LR = 47
ETHERNET_10GBASE_SR = 48
ETHERNET_10GBASE_W = 49
ETHERNET_10GBASE_EW = 50
ETHERNET_10GBASE_LW = 51
ETHERNET_10GBASE_SW = 52
ETHERNET_1000BASE_ZX = 53
ETHERNET_1000BASE_CWDM = 54
ETHERNET_1000BASE_CWDM_1470 = 55
ETHERNET_1000BASE_CWDM_1490 = 56
ETHERNET_1000BASE_CWDM_1510 = 57
ETHERNET_1000BASE_CWDM_1530 = 58
ETHERNET_1000BASE_CWDM_1550 = 59
ETHERNET_1000BASE_CWDM_1570 = 60
ETHERNET_1000BASE_CWDM_1590 = 61
ETHERNET_1000BASE_CWDM_1610 = 62
ETHERNET_10GBASE_ZR = 63
ETHERNET_10GBASE_DWDM = 64
ETHERNET_100GBASE_LR4 = 65
ETHERNET_1000BASE_DWDM = 66
ETHERNET_1000BASE_DWDM_1533 = 67
ETHERNET_1000BASE_DWDM_1537 = 68
ETHERNET_1000BASE_DWDM_1541 = 69
ETHERNET_1000BASE_DWDM_1545 = 70
ETHERNET_1000BASE_DWDM_1549 = 71
ETHERNET_1000BASE_DWDM_1553 = 72
ETHERNET_1000BASE_DWDM_1557 = 73
ETHERNET_1000BASE_DWDM_1561 = 74
ETHERNET_40GBASE_LR4 = 75
ETHERNET_40GBASE_ER4 = 76
ETHERNET_100GBASE_ER4 = 77
ETHERNET_1000BASE_EX = 78
ETHERNET_1000BASE_BX10_D = 79
ETHERNET_1000BASE_BX10_U = 80
ETHERNET_1000BASE_DWDM_1561_42 = 81
ETHERNET_1000BASE_DWDM_1560_61 = 82
ETHERNET_1000BASE_DWDM_1559_79 = 83
ETHERNET_1000BASE_DWDM_1558_98 = 84
ETHERNET_1000BASE_DWDM_1558_17 = 85
ETHERNET_1000BASE_DWDM_1557_36 = 86
ETHERNET_1000BASE_DWDM_1556_55 = 87
ETHERNET_1000BASE_DWDM_1555_75 = 88
ETHERNET_1000BASE_DWDM_1554_94 = 89
ETHERNET_1000BASE_DWDM_1554_13 = 90
ETHERNET_1000BASE_DWDM_1553_33 = 91
ETHERNET_1000BASE_DWDM_1552_52 = 92
ETHERNET_1000BASE_DWDM_1551_72 = 93
ETHERNET_1000BASE_DWDM_1550_92 = 94
ETHERNET_1000BASE_DWDM_1550_12 = 95
ETHERNET_1000BASE_DWDM_1549_32 = 96
ETHERNET_1000BASE_DWDM_1548_51 = 97
ETHERNET_1000BASE_DWDM_1547_72 = 98
ETHERNET_1000BASE_DWDM_1546_92 = 99
ETHERNET_1000BASE_DWDM_1546_12 = 100
ETHERNET_1000BASE_DWDM_1545_32 = 101
ETHERNET_1000BASE_DWDM_1544_53 = 102
ETHERNET_1000BASE_DWDM_1543_73 = 103
ETHERNET_1000BASE_DWDM_1542_94 = 104
ETHERNET_1000BASE_DWDM_1542_14 = 105
ETHERNET_1000BASE_DWDM_1541_35 = 106
ETHERNET_1000BASE_DWDM_1540_56 = 107
ETHERNET_1000BASE_DWDM_1539_77 = 108
ETHERNET_1000BASE_DWDM_1538_98 = 109
ETHERNET_1000BASE_DWDM_1538_19 = 110
ETHERNET_1000BASE_DWDM_1537_40 = 111
ETHERNET_1000BASE_DWDM_1536_61 = 112
ETHERNET_1000BASE_DWDM_1535_82 = 113
ETHERNET_1000BASE_DWDM_1535_04 = 114
ETHERNET_1000BASE_DWDM_1534_25 = 115
ETHERNET_1000BASE_DWDM_1533_47 = 116
ETHERNET_1000BASE_DWDM_1532_68 = 117
ETHERNET_1000BASE_DWDM_1531_90 = 118
ETHERNET_1000BASE_DWDM_1531_12 = 119
ETHERNET_1000BASE_DWDM_1530_33 = 120
ETHERNET_1000BASE_DWDM_TUNABLE = 121
ETHERNET_10GBASE_DWDM_1561_42 = 122
ETHERNET_10GBASE_DWDM_1560_61 = 123
ETHERNET_10GBASE_DWDM_1559_79 = 124
ETHERNET_10GBASE_DWDM_1558_98 = 125
ETHERNET_10GBASE_DWDM_1558_17 = 126
ETHERNET_10GBASE_DWDM_1557_36 = 127
ETHERNET_10GBASE_DWDM_1556_55 = 128
ETHERNET_10GBASE_DWDM_1555_75 = 129
ETHERNET_10GBASE_DWDM_1554_94 = 130
ETHERNET_10GBASE_DWDM_1554_13 = 131
ETHERNET_10GBASE_DWDM_1553_33 = 132
ETHERNET_10GBASE_DWDM_1552_52 = 133
ETHERNET_10GBASE_DWDM_1551_72 = 134
ETHERNET_10GBASE_DWDM_1550_92 = 135
ETHERNET_10GBASE_DWDM_1550_12 = 136
ETHERNET_10GBASE_DWDM_1549_32 = 137
ETHERNET_10GBASE_DWDM_1548_51 = 138
ETHERNET_10GBASE_DWDM_1547_72 = 139
ETHERNET_10GBASE_DWDM_1546_92 = 140
ETHERNET_10GBASE_DWDM_1546_12 = 141
ETHERNET_10GBASE_DWDM_1545_32 = 142
ETHERNET_10GBASE_DWDM_1544_53 = 143
ETHERNET_10GBASE_DWDM_1543_73 = 144
ETHERNET_10GBASE_DWDM_1542_94 = 145
ETHERNET_10GBASE_DWDM_1542_14 = 146
ETHERNET_10GBASE_DWDM_1541_35 = 147
ETHERNET_10GBASE_DWDM_1540_56 = 148
ETHERNET_10GBASE_DWDM_1539_77 = 149
ETHERNET_10GBASE_DWDM_1538_98 = 150
ETHERNET_10GBASE_DWDM_1538_19 = 151
ETHERNET_10GBASE_DWDM_1537_40 = 152
ETHERNET_10GBASE_DWDM_1536_61 = 153
ETHERNET_10GBASE_DWDM_1535_82 = 154
ETHERNET_10GBASE_DWDM_1535_04 = 155
ETHERNET_10GBASE_DWDM_1534_25 = 156
ETHERNET_10GBASE_DWDM_1533_47 = 157
ETHERNET_10GBASE_DWDM_1532_68 = 158
ETHERNET_10GBASE_DWDM_1531_90 = 159
ETHERNET_10GBASE_DWDM_1531_12 = 160
ETHERNET_10GBASE_DWDM_1530_33 = 161
ETHERNET_10GBASE_DWDM_TUNABLE = 162
ETHERNET_40GBASE_DWDM_1561_42 = 163
ETHERNET_40GBASE_DWDM_1560_61 = 164
ETHERNET_40GBASE_DWDM_1559_79 = 165
ETHERNET_40GBASE_DWDM_1558_98 = 166
ETHERNET_40GBASE_DWDM_1558_17 = 167
ETHERNET_40GBASE_DWDM_1557_36 = 168
ETHERNET_40GBASE_DWDM_1556_55 = 169
ETHERNET_40GBASE_DWDM_1555_75 = 170
ETHERNET_40GBASE_DWDM_1554_94 = 171
ETHERNET_40GBASE_DWDM_1554_13 = 172
ETHERNET_40GBASE_DWDM_1553_33 = 173
ETHERNET_40GBASE_DWDM_1552_52 = 174
ETHERNET_40GBASE_DWDM_1551_72 = 175
ETHERNET_40GBASE_DWDM_1550_92 = 176
ETHERNET_40GBASE_DWDM_1550_12 = 177
ETHERNET_40GBASE_DWDM_1549_32 = 178
ETHERNET_40GBASE_DWDM_1548_51 = 179
ETHERNET_40GBASE_DWDM_1547_72 = 180
ETHERNET_40GBASE_DWDM_1546_92 = 181
ETHERNET_40GBASE_DWDM_1546_12 = 182
ETHERNET_40GBASE_DWDM_1545_32 = 183
ETHERNET_40GBASE_DWDM_1544_53 = 184
ETHERNET_40GBASE_DWDM_1543_73 = 185
ETHERNET_40GBASE_DWDM_1542_94 = 186
ETHERNET_40GBASE_DWDM_1542_14 = 187
ETHERNET_40GBASE_DWDM_1541_35 = 188
ETHERNET_40GBASE_DWDM_1540_56 = 189
ETHERNET_40GBASE_DWDM_1539_77 = 190
ETHERNET_40GBASE_DWDM_1538_98 = 191
ETHERNET_40GBASE_DWDM_1538_19 = 192
ETHERNET_40GBASE_DWDM_1537_40 = 193
ETHERNET_40GBASE_DWDM_1536_61 = 194
ETHERNET_40GBASE_DWDM_1535_82 = 195
ETHERNET_40GBASE_DWDM_1535_04 = 196
ETHERNET_40GBASE_DWDM_1534_25 = 197
ETHERNET_40GBASE_DWDM_1533_47 = 198
ETHERNET_40GBASE_DWDM_1532_68 = 199
ETHERNET_40GBASE_DWDM_1531_90 = 200
ETHERNET_40GBASE_DWDM_1531_12 = 201
ETHERNET_40GBASE_DWDM_1530_33 = 202
ETHERNET_40GBASE_DWDM_TUNABLE = 203
ETHERNET_100GBASE_DWDM_1561_42 = 204
ETHERNET_100GBASE_DWDM_1560_61 = 205
ETHERNET_100GBASE_DWDM_1559_79 = 206
ETHERNET_100GBASE_DWDM_1558_98 = 207
ETHERNET_100GBASE_DWDM_1558_17 = 208
ETHERNET_100GBASE_DWDM_1557_36 = 209
ETHERNET_100GBASE_DWDM_1556_55 = 210
ETHERNET_100GBASE_DWDM_1555_75 = 211
ETHERNET_100GBASE_DWDM_1554_94 = 212
ETHERNET_100GBASE_DWDM_1554_13 = 213
ETHERNET_100GBASE_DWDM_1553_33 = 214
ETHERNET_100GBASE_DWDM_1552_52 = 215
ETHERNET_100GBASE_DWDM_1551_72 = 216
ETHERNET_100GBASE_DWDM_1550_92 = 217
ETHERNET_100GBASE_DWDM_1550_12 = 218
ETHERNET_100GBASE_DWDM_1549_32 = 219
ETHERNET_100GBASE_DWDM_1548_51 = 220
ETHERNET_100GBASE_DWDM_1547_72 = 221
ETHERNET_100GBASE_DWDM_1546_92 = 222
ETHERNET_100GBASE_DWDM_1546_12 = 223
ETHERNET_100GBASE_DWDM_1545_32 = 224
ETHERNET_100GBASE_DWDM_1544_53 = 225
ETHERNET_100GBASE_DWDM_1543_73 = 226
ETHERNET_100GBASE_DWDM_1542_94 = 227
ETHERNET_100GBASE_DWDM_1542_14 = 228
ETHERNET_100GBASE_DWDM_1541_35 = 229
ETHERNET_100GBASE_DWDM_1540_56 = 230
ETHERNET_100GBASE_DWDM_1539_77 = 231
ETHERNET_100GBASE_DWDM_1538_98 = 232
ETHERNET_100GBASE_DWDM_1538_19 = 233
ETHERNET_100GBASE_DWDM_1537_40 = 234
ETHERNET_100GBASE_DWDM_1536_61 = 235
ETHERNET_100GBASE_DWDM_1535_82 = 236
ETHERNET_100GBASE_DWDM_1535_04 = 237
ETHERNET_100GBASE_DWDM_1534_25 = 238
ETHERNET_100GBASE_DWDM_1533_47 = 239
ETHERNET_100GBASE_DWDM_1532_68 = 240
ETHERNET_100GBASE_DWDM_1531_90 = 241
ETHERNET_100GBASE_DWDM_1531_12 = 242
ETHERNET_100GBASE_DWDM_1530_33 = 243
ETHERNET_100GBASE_DWDM_TUNABLE = 244
ETHERNET_40GBASE_KR4 = 245
ETHERNET_40GBASE_CR4 = 246
ETHERNET_40GBASE_SR4 = 247
ETHERNET_40GBASE_FR = 248
ETHERNET_100GBASE_CR10 = 249
ETHERNET_100GBASE_SR10 = 250
ETHERNET_40GBASE_CSR4 = 251
ETHERNET_10GBASE_CWDM = 252
ETHERNET_10GBASE_CWDM_TUNABLE = 253
ETHERNET_10GBASE_CWDM_1470 = 254
ETHERNET_10GBASE_CWDM_1490 = 255
ETHERNET_10GBASE_CWDM_1510 = 256
ETHERNET_10GBASE_CWDM_1530 = 257
ETHERNET_10GBASE_CWDM_1550 = 258
ETHERNET_10GBASE_CWDM_1570 = 259
ETHERNET_10GBASE_CWDM_1590 = 260
ETHERNET_10GBASE_CWDM_1610 = 261
ETHERNET_40GBASE_CWDM = 262
ETHERNET_40GBASE_CWDM_TUNABLE = 263
ETHERNET_40GBASE_CWDM_1470 = 264
ETHERNET_40GBASE_CWDM_1490 = 265
ETHERNET_40GBASE_CWDM_1510 = 266
ETHERNET_40GBASE_CWDM_1530 = 267
ETHERNET_40GBASE_CWDM_1550 = 268
ETHERNET_40GBASE_CWDM_1570 = 269
ETHERNET_40GBASE_CWDM_1590 = 270
ETHERNET_40GBASE_CWDM_1610 = 271
ETHERNET_100GBASE_CWDM = 272
ETHERNET_100GBASE_CWDM_TUNABLE = 273
ETHERNET_100GBASE_CWDM_1470 = 274
ETHERNET_100GBASE_CWDM_1490 = 275
ETHERNET_100GBASE_CWDM_1510 = 276
ETHERNET_100GBASE_CWDM_1530 = 277
ETHERNET_100GBASE_CWDM_1550 = 278
ETHERNET_100GBASE_CWDM_1570 = 279
ETHERNET_100GBASE_CWDM_1590 = 280
ETHERNET_100GBASE_CWDM_1610 = 281
ETHERNET_40GBASE_ELPB = 282
ETHERNET_100GBASE_ELPB = 283
ETHERNET_100GBASE_LR10 = 284
ETHERNET_40GBASE = 285
ETHERNET_100GBASE_KP4 = 286
ETHERNET_100GBASE_KR4 = 287
ETHERNET_10GBASE_LRM = 288
ETHERNET_10GBASE_CX4 = 289
ETHERNET_10GBASE = 290
ETHERNET_10GBASE_KX4 = 291
ETHERNET_10GBASE_KR = 292
ETHERNET_10GBASE_PR = 293
ETHERNET_100BASE_LX = 294
ETHERNET_100BASE_ZX = 295
ETHERNET_1000BASE_BX_D = 296
ETHERNET_1000BASE_BX_U = 297
ETHERNET_1000BASE_BX20_D = 298
ETHERNET_1000BASE_BX20_U = 299
ETHERNET_1000BASE_BX40_D = 300
ETHERNET_1000BASE_BX40_DA = 301
ETHERNET_1000BASE_BX40_U = 302
ETHERNET_1000BASE_BX80_D = 303
ETHERNET_1000BASE_BX80_U = 304
ETHERNET_1000BASE_BX120_D = 305
ETHERNET_1000BASE_BX120_U = 306
ETHERNET_10GBASE_BX_D = 307
ETHERNET_10GBASE_BX_U = 308
ETHERNET_10GBASE_BX10_D = 309
ETHERNET_10GBASE_BX10_U = 310
ETHERNET_10GBASE_BX20_D = 311
ETHERNET_10GBASE_BX20_U = 312
ETHERNET_10GBASE_BX40_D = 313
ETHERNET_10GBASE_BX40_U = 314
ETHERNET_10GBASE_BX80_D = 315
ETHERNET_10GBASE_BX80_U = 316
ETHERNET_10GBASE_BX120_D = 317
ETHERNET_10GBASE_BX120_U = 318
ETHERNET_1000BASE_DR_LX = 319
ETHERNET_100GBASE_ER4L = 320
ETHERNET_100GBASE_SR4 = 321
ETHERNET_40GBASE_SR_BD = 322
ETHERNET_BASE_MAX = 323
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetMediaEnum']
class EthernetPortEnableEnum(Enum):
"""
EthernetPortEnableEnum
Port admin state
.. data:: DISABLED = 0
Port disabled, both directions
.. data:: RX_ENABLED = 1
Port enabled rx direction only
.. data:: TX_ENABLED = 2
Port enabled tx direction only
.. data:: ENABLED = 3
Port enabled, both directions
"""
DISABLED = 0
RX_ENABLED = 1
TX_ENABLED = 2
ENABLED = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetPortEnableEnum']
class EthernetSpeedEnum(Enum):
"""
EthernetSpeedEnum
Speed
.. data:: ETHERNET_SPEED_INVALID = 0
ethernet speed invalid
.. data:: TEN_MBPS = 1
ten mbps
.. data:: HUNDRED_MBPS = 2
hundred mbps
.. data:: ONE_GBPS = 3
one gbps
.. data:: TEN_GBPS = 4
ten gbps
.. data:: FORTY_GBPS = 5
forty gbps
.. data:: HUNDRED_GBPS = 6
hundred gbps
.. data:: ETHERNET_SPEED_TYPES_COUNT = 7
ethernet speed types count
"""
ETHERNET_SPEED_INVALID = 0
TEN_MBPS = 1
HUNDRED_MBPS = 2
ONE_GBPS = 3
TEN_GBPS = 4
FORTY_GBPS = 5
HUNDRED_GBPS = 6
ETHERNET_SPEED_TYPES_COUNT = 7
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetSpeedEnum']
class EthernetInterface(object):
"""
Ethernet operational data
.. attribute:: berts
Ethernet controller BERT table
**type**\: :py:class:`Berts <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Berts>`
.. attribute:: interfaces
Ethernet controller info table
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces>`
.. attribute:: statistics
Ethernet controller statistics table
**type**\: :py:class:`Statistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Statistics>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.berts = EthernetInterface.Berts()
self.berts.parent = self
self.interfaces = EthernetInterface.Interfaces()
self.interfaces.parent = self
self.statistics = EthernetInterface.Statistics()
self.statistics.parent = self
class Statistics(object):
"""
Ethernet controller statistics table
.. attribute:: statistic
Ethernet statistics information
**type**\: list of :py:class:`Statistic <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Statistics.Statistic>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.statistic = YList()
self.statistic.parent = self
self.statistic.name = 'statistic'
class Statistic(object):
"""
Ethernet statistics information
.. attribute:: interface_name <key>
The name of the interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: aborted_packet_drops
Drops due to packet abort
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: buffer_underrun_packet_drops
Drops due to buffer underrun
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: dropped_ether_stats_fragments
Bad Frames < 64 Octet, dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: dropped_ether_stats_undersize_pkts
Good frames < 64 Octet, dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: dropped_giant_packets_greaterthan_mru
Good frames > MRU, dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: dropped_jabbers_packets_greaterthan_mru
Bad Frames > MRU, dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: dropped_miscellaneous_error_packets
Any other errors not counted
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: dropped_packets_with_crc_align_errors
Frames 64 \- MRU with CRC error
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: ether_stats_collisions
All collision events
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: invalid_dest_mac_drop_packets
Drops due to the destination MAC not matching
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: invalid_encap_drop_packets
Drops due to the encapsulation or ether type not matching
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: miscellaneous_output_errors
Any other errors not counted
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: number_of_aborted_packets_dropped
Drops due to packet abort
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: number_of_buffer_overrun_packets_dropped
Drops due to buffer overrun
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: number_of_miscellaneous_packets_dropped
Any other drops not counted
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: numberof_invalid_vlan_id_packets_dropped
Drops due to invalid VLAN id
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received8021q_frames
All 802.1Q frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_broadcast_frames
Received broadcast Frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_good_bytes
Total octets of all good frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_good_frames
Received Good Frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_multicast_frames
Received multicast Frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_pause_frames
All pause frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_total64_octet_frames
All 64 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_total_bytes
Total octets of all frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_total_frames
All frames, good or bad
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_total_octet_frames_from1024_to1518
All 1024\-1518 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_total_octet_frames_from128_to255
All 128\-255 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_total_octet_frames_from1519_to_max
All > 1518 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_total_octet_frames_from256_to511
All 256\-511 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_total_octet_frames_from512_to1023
All 512\-1023 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_total_octet_frames_from65_to127
All 65\-127 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_unicast_frames
Received unicast Frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: received_unknown_opcodes
Unsupported MAC Control frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: rfc2819_ether_stats_crc_align_errors
RFC2819 etherStatsCRCAlignErrors
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: rfc2819_ether_stats_jabbers
RFC2819 etherStatsJabbers
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: rfc2819_ether_stats_oversized_pkts
RFC2819 etherStatsOversizedPkts
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: rfc3635dot3_stats_alignment_errors
RFC3635 dot3StatsAlignmentErrors
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: symbol_errors
Symbol errors
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: total_bytes_transmitted
Total octets of all frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: total_frames_transmitted
All frames, good or bad
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: total_good_bytes_transmitted
Total octets of all good frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted8021q_frames
All 802.1Q frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted_broadcast_frames
Transmitted broadcast Frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted_good_frames
Transmitted Good Frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted_multicast_frames
Transmitted multicast Frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted_total64_octet_frames
All 64 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted_total_octet_frames_from1024_to1518
All 1024\-1518 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted_total_octet_frames_from128_to255
All 128\-255 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted_total_octet_frames_from1518_to_max
All > 1518 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted_total_octet_frames_from256_to511
All 256\-511 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted_total_octet_frames_from512_to1023
All 512\-1023 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted_total_octet_frames_from65_to127
All 65\-127 Octet Frame Count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted_total_pause_frames
All pause frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transmitted_unicast_frames
Transmitted unicast Frames
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: uncounted_dropped_frames
Any other drops not counted
**type**\: long
**range:** 0..18446744073709551615
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.interface_name = None
self.aborted_packet_drops = None
self.buffer_underrun_packet_drops = None
self.dropped_ether_stats_fragments = None
self.dropped_ether_stats_undersize_pkts = None
self.dropped_giant_packets_greaterthan_mru = None
self.dropped_jabbers_packets_greaterthan_mru = None
self.dropped_miscellaneous_error_packets = None
self.dropped_packets_with_crc_align_errors = None
self.ether_stats_collisions = None
self.invalid_dest_mac_drop_packets = None
self.invalid_encap_drop_packets = None
self.miscellaneous_output_errors = None
self.number_of_aborted_packets_dropped = None
self.number_of_buffer_overrun_packets_dropped = None
self.number_of_miscellaneous_packets_dropped = None
self.numberof_invalid_vlan_id_packets_dropped = None
self.received8021q_frames = None
self.received_broadcast_frames = None
self.received_good_bytes = None
self.received_good_frames = None
self.received_multicast_frames = None
self.received_pause_frames = None
self.received_total64_octet_frames = None
self.received_total_bytes = None
self.received_total_frames = None
self.received_total_octet_frames_from1024_to1518 = None
self.received_total_octet_frames_from128_to255 = None
self.received_total_octet_frames_from1519_to_max = None
self.received_total_octet_frames_from256_to511 = None
self.received_total_octet_frames_from512_to1023 = None
self.received_total_octet_frames_from65_to127 = None
self.received_unicast_frames = None
self.received_unknown_opcodes = None
self.rfc2819_ether_stats_crc_align_errors = None
self.rfc2819_ether_stats_jabbers = None
self.rfc2819_ether_stats_oversized_pkts = None
self.rfc3635dot3_stats_alignment_errors = None
self.symbol_errors = None
self.total_bytes_transmitted = None
self.total_frames_transmitted = None
self.total_good_bytes_transmitted = None
self.transmitted8021q_frames = None
self.transmitted_broadcast_frames = None
self.transmitted_good_frames = None
self.transmitted_multicast_frames = None
self.transmitted_total64_octet_frames = None
self.transmitted_total_octet_frames_from1024_to1518 = None
self.transmitted_total_octet_frames_from128_to255 = None
self.transmitted_total_octet_frames_from1518_to_max = None
self.transmitted_total_octet_frames_from256_to511 = None
self.transmitted_total_octet_frames_from512_to1023 = None
self.transmitted_total_octet_frames_from65_to127 = None
self.transmitted_total_pause_frames = None
self.transmitted_unicast_frames = None
self.uncounted_dropped_frames = None
@property
def _common_path(self):
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return '/Cisco-IOS-XR-drivers-media-eth-oper:ethernet-interface/Cisco-IOS-XR-drivers-media-eth-oper:statistics/Cisco-IOS-XR-drivers-media-eth-oper:statistic[Cisco-IOS-XR-drivers-media-eth-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.aborted_packet_drops is not None:
return True
if self.buffer_underrun_packet_drops is not None:
return True
if self.dropped_ether_stats_fragments is not None:
return True
if self.dropped_ether_stats_undersize_pkts is not None:
return True
if self.dropped_giant_packets_greaterthan_mru is not None:
return True
if self.dropped_jabbers_packets_greaterthan_mru is not None:
return True
if self.dropped_miscellaneous_error_packets is not None:
return True
if self.dropped_packets_with_crc_align_errors is not None:
return True
if self.ether_stats_collisions is not None:
return True
if self.invalid_dest_mac_drop_packets is not None:
return True
if self.invalid_encap_drop_packets is not None:
return True
if self.miscellaneous_output_errors is not None:
return True
if self.number_of_aborted_packets_dropped is not None:
return True
if self.number_of_buffer_overrun_packets_dropped is not None:
return True
if self.number_of_miscellaneous_packets_dropped is not None:
return True
if self.numberof_invalid_vlan_id_packets_dropped is not None:
return True
if self.received8021q_frames is not None:
return True
if self.received_broadcast_frames is not None:
return True
if self.received_good_bytes is not None:
return True
if self.received_good_frames is not None:
return True
if self.received_multicast_frames is not None:
return True
if self.received_pause_frames is not None:
return True
if self.received_total64_octet_frames is not None:
return True
if self.received_total_bytes is not None:
return True
if self.received_total_frames is not None:
return True
if self.received_total_octet_frames_from1024_to1518 is not None:
return True
if self.received_total_octet_frames_from128_to255 is not None:
return True
if self.received_total_octet_frames_from1519_to_max is not None:
return True
if self.received_total_octet_frames_from256_to511 is not None:
return True
if self.received_total_octet_frames_from512_to1023 is not None:
return True
if self.received_total_octet_frames_from65_to127 is not None:
return True
if self.received_unicast_frames is not None:
return True
if self.received_unknown_opcodes is not None:
return True
if self.rfc2819_ether_stats_crc_align_errors is not None:
return True
if self.rfc2819_ether_stats_jabbers is not None:
return True
if self.rfc2819_ether_stats_oversized_pkts is not None:
return True
if self.rfc3635dot3_stats_alignment_errors is not None:
return True
if self.symbol_errors is not None:
return True
if self.total_bytes_transmitted is not None:
return True
if self.total_frames_transmitted is not None:
return True
if self.total_good_bytes_transmitted is not None:
return True
if self.transmitted8021q_frames is not None:
return True
if self.transmitted_broadcast_frames is not None:
return True
if self.transmitted_good_frames is not None:
return True
if self.transmitted_multicast_frames is not None:
return True
if self.transmitted_total64_octet_frames is not None:
return True
if self.transmitted_total_octet_frames_from1024_to1518 is not None:
return True
if self.transmitted_total_octet_frames_from128_to255 is not None:
return True
if self.transmitted_total_octet_frames_from1518_to_max is not None:
return True
if self.transmitted_total_octet_frames_from256_to511 is not None:
return True
if self.transmitted_total_octet_frames_from512_to1023 is not None:
return True
if self.transmitted_total_octet_frames_from65_to127 is not None:
return True
if self.transmitted_total_pause_frames is not None:
return True
if self.transmitted_unicast_frames is not None:
return True
if self.uncounted_dropped_frames is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Statistics.Statistic']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-drivers-media-eth-oper:ethernet-interface/Cisco-IOS-XR-drivers-media-eth-oper:statistics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.statistic is not None:
for child_ref in self.statistic:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Statistics']['meta_info']
class Interfaces(object):
"""
Ethernet controller info table
.. attribute:: interface
Ethernet controller information
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
Ethernet controller information
.. attribute:: interface_name <key>
The name of the interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: admin_state
Port Administrative State
**type**\: :py:class:`EthernetPortEnableEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetPortEnableEnum>`
.. attribute:: layer1_info
Layer 1 information
**type**\: :py:class:`Layer1Info <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.Layer1Info>`
.. attribute:: mac_info
MAC Layer information
**type**\: :py:class:`MacInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.MacInfo>`
.. attribute:: oper_state_up
Port Operational state \- TRUE if up
**type**\: bool
.. attribute:: phy_info
PHY information
**type**\: :py:class:`PhyInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.PhyInfo>`
.. attribute:: transport_info
Transport state information
**type**\: :py:class:`TransportInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.TransportInfo>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.interface_name = None
self.admin_state = None
self.layer1_info = EthernetInterface.Interfaces.Interface.Layer1Info()
self.layer1_info.parent = self
self.mac_info = EthernetInterface.Interfaces.Interface.MacInfo()
self.mac_info.parent = self
self.oper_state_up = None
self.phy_info = EthernetInterface.Interfaces.Interface.PhyInfo()
self.phy_info.parent = self
self.transport_info = EthernetInterface.Interfaces.Interface.TransportInfo()
self.transport_info.parent = self
class PhyInfo(object):
"""
PHY information
.. attribute:: fec_details
Forward Error Correction information
**type**\: :py:class:`FecDetails <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.PhyInfo.FecDetails>`
.. attribute:: loopback
Port operational loopback
**type**\: :py:class:`EthernetLoopbackEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetLoopbackEnum>`
.. attribute:: media_type
Port media type
**type**\: :py:class:`EthernetMediaEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetMediaEnum>`
.. attribute:: phy_details
Details about the PHY
**type**\: :py:class:`PhyDetails <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails>`
.. attribute:: phy_present
Presence of PHY
**type**\: :py:class:`EtherPhyPresentEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherPhyPresentEnum>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.fec_details = EthernetInterface.Interfaces.Interface.PhyInfo.FecDetails()
self.fec_details.parent = self
self.loopback = None
self.media_type = None
self.phy_details = EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails()
self.phy_details.parent = self
self.phy_present = None
class PhyDetails(object):
"""
Details about the PHY
.. attribute:: dig_opt_mon_alarm_thresholds
Digital Optical Monitoring alarm thresholds
**type**\: :py:class:`DigOptMonAlarmThresholds <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.DigOptMonAlarmThresholds>`
.. attribute:: dig_opt_mon_alarms
Digital Optical Monitoring alarms
**type**\: :py:class:`DigOptMonAlarms <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.DigOptMonAlarms>`
.. attribute:: lane
Digital Optical Monitoring (per lane information)
**type**\: list of :py:class:`Lane <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.Lane>`
.. attribute:: lane_field_validity
Digital Optical Monitoring (per lane information) validity
**type**\: :py:class:`LaneFieldValidity <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.LaneFieldValidity>`
.. attribute:: optics_wavelength
Wavelength of the optics being used in nm \* 1000
**type**\: int
**range:** 0..4294967295
.. attribute:: transceiver_temperature
The temperature of the transceiver (mDegrees C)
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: transceiver_voltage
The input voltage to the transceiver (mV)
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: vendor
Name of the port optics manufacturer
**type**\: str
.. attribute:: vendor_part_number
Part number for the port optics
**type**\: str
.. attribute:: vendor_serial_number
Serial number for the port optics
**type**\: str
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.dig_opt_mon_alarm_thresholds = EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.DigOptMonAlarmThresholds()
self.dig_opt_mon_alarm_thresholds.parent = self
self.dig_opt_mon_alarms = EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.DigOptMonAlarms()
self.dig_opt_mon_alarms.parent = self
self.lane = YList()
self.lane.parent = self
self.lane.name = 'lane'
self.lane_field_validity = EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.LaneFieldValidity()
self.lane_field_validity.parent = self
self.optics_wavelength = None
self.transceiver_temperature = None
self.transceiver_voltage = None
self.vendor = None
self.vendor_part_number = None
self.vendor_serial_number = None
class LaneFieldValidity(object):
"""
Digital Optical Monitoring (per lane
information) validity
.. attribute:: laser_bias_valid
The laser bias 'per lane' field is valid
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: receive_power_valid
The receive power 'per lane' field is valid
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: transmit_power_valid
The transmit power 'per lane' field is valid
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: wavelength_valid
The wavelength 'per lane' field is valid
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.laser_bias_valid = None
self.receive_power_valid = None
self.transmit_power_valid = None
self.wavelength_valid = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:lane-field-validity'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.laser_bias_valid is not None:
return True
if self.receive_power_valid is not None:
return True
if self.transmit_power_valid is not None:
return True
if self.wavelength_valid is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.LaneFieldValidity']['meta_info']
class DigOptMonAlarmThresholds(object):
"""
Digital Optical Monitoring alarm thresholds
.. attribute:: field_validity
Field validity
**type**\: :py:class:`FieldValidity <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.DigOptMonAlarmThresholds.FieldValidity>`
.. attribute:: laser_bias_alarm_high
Laser bias high alarm threshold (mA)
**type**\: int
**range:** 0..4294967295
.. attribute:: laser_bias_alarm_low
Laser bias low alarm threshold (mA)
**type**\: int
**range:** 0..4294967295
.. attribute:: laser_bias_warning_high
Laser bias high warning threshold (mA)
**type**\: int
**range:** 0..4294967295
.. attribute:: laser_bias_warning_low
Laser bias low warning threshold (mA)
**type**\: int
**range:** 0..4294967295
.. attribute:: optical_receive_power_alarm_high
High optical receive power alarm threshold (mW)
**type**\: int
**range:** 0..4294967295
.. attribute:: optical_receive_power_alarm_low
Low optical receive power alarm threshold (mW)
**type**\: int
**range:** 0..4294967295
.. attribute:: optical_receive_power_warning_high
High optical receive power warning threshold (mW)
**type**\: int
**range:** 0..4294967295
.. attribute:: optical_receive_power_warning_low
Low optical receive power warning threshold (mW)
**type**\: int
**range:** 0..4294967295
.. attribute:: optical_transmit_power_alarm_high
High optical transmit power alarm threshold (mW)
**type**\: int
**range:** 0..4294967295
.. attribute:: optical_transmit_power_alarm_low
Low optical transmit power alarm threshold (mW)
**type**\: int
**range:** 0..4294967295
.. attribute:: optical_transmit_power_warning_high
High optical transmit power warning threshold (mW)
**type**\: int
**range:** 0..4294967295
.. attribute:: optical_transmit_power_warning_low
Low optical transmit power warning threshold (mW)
**type**\: int
**range:** 0..4294967295
.. attribute:: transceiver_temperature_alarm_high
Transceiver high temperature alarm threshold (mDegrees C)
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: transceiver_temperature_alarm_low
Transceiver low temperature alarm threshold (mDegrees C)
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: transceiver_temperature_warning_high
Transceiver high temperature warning threshold (mDegrees C)
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: transceiver_temperature_warning_low
Transceiver low temperature warning threshold (mDegrees C)
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: transceiver_voltage_alarm_high
Transceiver high voltage alarm threshold (mV)
**type**\: int
**range:** 0..4294967295
.. attribute:: transceiver_voltage_alarm_low
Transceiver low voltage alarm threshold (mV)
**type**\: int
**range:** 0..4294967295
.. attribute:: transceiver_voltage_warning_high
Transceiver high voltage warning threshold (mV)
**type**\: int
**range:** 0..4294967295
.. attribute:: transceiver_voltage_warning_low
Transceiver low voltage warning threshold (mV)
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.field_validity = EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.DigOptMonAlarmThresholds.FieldValidity()
self.field_validity.parent = self
self.laser_bias_alarm_high = None
self.laser_bias_alarm_low = None
self.laser_bias_warning_high = None
self.laser_bias_warning_low = None
self.optical_receive_power_alarm_high = None
self.optical_receive_power_alarm_low = None
self.optical_receive_power_warning_high = None
self.optical_receive_power_warning_low = None
self.optical_transmit_power_alarm_high = None
self.optical_transmit_power_alarm_low = None
self.optical_transmit_power_warning_high = None
self.optical_transmit_power_warning_low = None
self.transceiver_temperature_alarm_high = None
self.transceiver_temperature_alarm_low = None
self.transceiver_temperature_warning_high = None
self.transceiver_temperature_warning_low = None
self.transceiver_voltage_alarm_high = None
self.transceiver_voltage_alarm_low = None
self.transceiver_voltage_warning_high = None
self.transceiver_voltage_warning_low = None
class FieldValidity(object):
"""
Field validity
.. attribute:: laser_bias_valid
The laser bias fields are valid
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: receive_power_valid
The receive power fields are valid
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: temperature_valid
The temperature fields are valid
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: transmit_power_valid
The transmit power fields are valid
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: voltage_valid
The voltage fields are valid
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.laser_bias_valid = None
self.receive_power_valid = None
self.temperature_valid = None
self.transmit_power_valid = None
self.voltage_valid = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:field-validity'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.laser_bias_valid is not None:
return True
if self.receive_power_valid is not None:
return True
if self.temperature_valid is not None:
return True
if self.transmit_power_valid is not None:
return True
if self.voltage_valid is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.DigOptMonAlarmThresholds.FieldValidity']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:dig-opt-mon-alarm-thresholds'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.field_validity is not None and self.field_validity._has_data():
return True
if self.laser_bias_alarm_high is not None:
return True
if self.laser_bias_alarm_low is not None:
return True
if self.laser_bias_warning_high is not None:
return True
if self.laser_bias_warning_low is not None:
return True
if self.optical_receive_power_alarm_high is not None:
return True
if self.optical_receive_power_alarm_low is not None:
return True
if self.optical_receive_power_warning_high is not None:
return True
if self.optical_receive_power_warning_low is not None:
return True
if self.optical_transmit_power_alarm_high is not None:
return True
if self.optical_transmit_power_alarm_low is not None:
return True
if self.optical_transmit_power_warning_high is not None:
return True
if self.optical_transmit_power_warning_low is not None:
return True
if self.transceiver_temperature_alarm_high is not None:
return True
if self.transceiver_temperature_alarm_low is not None:
return True
if self.transceiver_temperature_warning_high is not None:
return True
if self.transceiver_temperature_warning_low is not None:
return True
if self.transceiver_voltage_alarm_high is not None:
return True
if self.transceiver_voltage_alarm_low is not None:
return True
if self.transceiver_voltage_warning_high is not None:
return True
if self.transceiver_voltage_warning_low is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.DigOptMonAlarmThresholds']['meta_info']
class DigOptMonAlarms(object):
"""
Digital Optical Monitoring alarms
.. attribute:: laser_bias_current
Laser Bias Current Alarm
**type**\: :py:class:`EtherDomAlarmEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherDomAlarmEnum>`
.. attribute:: received_laser_power
Received Optical Power Alarm
**type**\: :py:class:`EtherDomAlarmEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherDomAlarmEnum>`
.. attribute:: transceiver_temperature
Transceiver Temperature Alarm
**type**\: :py:class:`EtherDomAlarmEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherDomAlarmEnum>`
.. attribute:: transceiver_voltage
Transceiver Voltage Alarm
**type**\: :py:class:`EtherDomAlarmEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherDomAlarmEnum>`
.. attribute:: transmit_laser_power
Transmit Laser Power Alarm
**type**\: :py:class:`EtherDomAlarmEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherDomAlarmEnum>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.laser_bias_current = None
self.received_laser_power = None
self.transceiver_temperature = None
self.transceiver_voltage = None
self.transmit_laser_power = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:dig-opt-mon-alarms'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.laser_bias_current is not None:
return True
if self.received_laser_power is not None:
return True
if self.transceiver_temperature is not None:
return True
if self.transceiver_voltage is not None:
return True
if self.transmit_laser_power is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.DigOptMonAlarms']['meta_info']
class Lane(object):
"""
Digital Optical Monitoring (per lane
information)
.. attribute:: center_wavelength
Center Wavelength (nm\*1000)
**type**\: int
**range:** 0..4294967295
.. attribute:: dig_opt_mon_alarm
Digital Optical Monitoring alarms
**type**\: :py:class:`DigOptMonAlarm <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.Lane.DigOptMonAlarm>`
.. attribute:: laser_bias_current
Laser Bias Current (uAmps)
**type**\: int
**range:** 0..4294967295
.. attribute:: received_laser_power
Received Optical Power (dBm\*1000)
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: transmit_laser_power
Transmit Laser Power (dBm\*1000)
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.center_wavelength = None
self.dig_opt_mon_alarm = EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.Lane.DigOptMonAlarm()
self.dig_opt_mon_alarm.parent = self
self.laser_bias_current = None
self.received_laser_power = None
self.transmit_laser_power = None
class DigOptMonAlarm(object):
"""
Digital Optical Monitoring alarms
.. attribute:: laser_bias_current
Laser Bias Current Alarm
**type**\: :py:class:`EtherDomAlarmEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherDomAlarmEnum>`
.. attribute:: received_laser_power
Received Optical Power Alarm
**type**\: :py:class:`EtherDomAlarmEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherDomAlarmEnum>`
.. attribute:: transmit_laser_power
Transmit Laser Power Alarm
**type**\: :py:class:`EtherDomAlarmEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherDomAlarmEnum>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.laser_bias_current = None
self.received_laser_power = None
self.transmit_laser_power = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:dig-opt-mon-alarm'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.laser_bias_current is not None:
return True
if self.received_laser_power is not None:
return True
if self.transmit_laser_power is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.Lane.DigOptMonAlarm']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:lane'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.center_wavelength is not None:
return True
if self.dig_opt_mon_alarm is not None and self.dig_opt_mon_alarm._has_data():
return True
if self.laser_bias_current is not None:
return True
if self.received_laser_power is not None:
return True
if self.transmit_laser_power is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails.Lane']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:phy-details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.dig_opt_mon_alarm_thresholds is not None and self.dig_opt_mon_alarm_thresholds._has_data():
return True
if self.dig_opt_mon_alarms is not None and self.dig_opt_mon_alarms._has_data():
return True
if self.lane is not None:
for child_ref in self.lane:
if child_ref._has_data():
return True
if self.lane_field_validity is not None and self.lane_field_validity._has_data():
return True
if self.optics_wavelength is not None:
return True
if self.transceiver_temperature is not None:
return True
if self.transceiver_voltage is not None:
return True
if self.vendor is not None:
return True
if self.vendor_part_number is not None:
return True
if self.vendor_serial_number is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.PhyInfo.PhyDetails']['meta_info']
class FecDetails(object):
"""
Forward Error Correction information
.. attribute:: corrected_codeword_count
Corrected codeword error count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: fec
Port operational FEC type
**type**\: :py:class:`EthernetFecEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetFecEnum>`
.. attribute:: uncorrected_codeword_count
Uncorrected codeword error count
**type**\: long
**range:** 0..18446744073709551615
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.corrected_codeword_count = None
self.fec = None
self.uncorrected_codeword_count = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:fec-details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.corrected_codeword_count is not None:
return True
if self.fec is not None:
return True
if self.uncorrected_codeword_count is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.PhyInfo.FecDetails']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:phy-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.fec_details is not None and self.fec_details._has_data():
return True
if self.loopback is not None:
return True
if self.media_type is not None:
return True
if self.phy_details is not None and self.phy_details._has_data():
return True
if self.phy_present is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.PhyInfo']['meta_info']
class Layer1Info(object):
"""
Layer 1 information
.. attribute:: autoneg
Port autonegotiation configuration settings
**type**\: :py:class:`Autoneg <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.Layer1Info.Autoneg>`
.. attribute:: bandwidth_utilization
Bandwidth utilization (hundredths of a percent)
**type**\: int
**range:** 0..4294967295
.. attribute:: ber_monitoring
BER monitoring details
**type**\: :py:class:`BerMonitoring <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.Layer1Info.BerMonitoring>`
.. attribute:: current_alarms
Current alarms
**type**\: :py:class:`CurrentAlarms <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.Layer1Info.CurrentAlarms>`
.. attribute:: duplex
Port operational duplexity
**type**\: :py:class:`EthernetDuplexEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetDuplexEnum>`
.. attribute:: error_counts
Statistics for detected errors
**type**\: :py:class:`ErrorCounts <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.Layer1Info.ErrorCounts>`
.. attribute:: flowcontrol
Port operational flow control
**type**\: :py:class:`EtherFlowcontrolEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherFlowcontrolEnum>`
.. attribute:: ipg
Port operational inter\-packet\-gap
**type**\: :py:class:`EthernetIpgEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetIpgEnum>`
.. attribute:: laser_squelch_enabled
Laser Squelch \- TRUE if enabled
**type**\: bool
.. attribute:: led_state
State of the LED
**type**\: :py:class:`EtherLedStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherLedStateEnum>`
.. attribute:: link_state
Link state
**type**\: :py:class:`EtherLinkStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherLinkStateEnum>`
.. attribute:: previous_alarms
Previous alarms
**type**\: :py:class:`PreviousAlarms <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.Layer1Info.PreviousAlarms>`
.. attribute:: speed
Port operational speed
**type**\: :py:class:`EthernetSpeedEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetSpeedEnum>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.autoneg = EthernetInterface.Interfaces.Interface.Layer1Info.Autoneg()
self.autoneg.parent = self
self.bandwidth_utilization = None
self.ber_monitoring = EthernetInterface.Interfaces.Interface.Layer1Info.BerMonitoring()
self.ber_monitoring.parent = self
self.current_alarms = EthernetInterface.Interfaces.Interface.Layer1Info.CurrentAlarms()
self.current_alarms.parent = self
self.duplex = None
self.error_counts = EthernetInterface.Interfaces.Interface.Layer1Info.ErrorCounts()
self.error_counts.parent = self
self.flowcontrol = None
self.ipg = None
self.laser_squelch_enabled = None
self.led_state = None
self.link_state = None
self.previous_alarms = EthernetInterface.Interfaces.Interface.Layer1Info.PreviousAlarms()
self.previous_alarms.parent = self
self.speed = None
class Autoneg(object):
"""
Port autonegotiation configuration settings
.. attribute:: autoneg_enabled
TRUE if autonegotiation is enabled
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: config_override
If true, configuration overrides negotiated settings. If false, negotiated settings in effect
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: duplex
Restricted duplex (if relevant bit is set in mask)
**type**\: :py:class:`EthernetDuplexEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetDuplexEnum>`
.. attribute:: flowcontrol
Restricted flowcontrol (if relevant bit is set in mask)
**type**\: :py:class:`EtherFlowcontrolEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherFlowcontrolEnum>`
.. attribute:: mask
Validity mask\: 0x1 speed, 0x2 duplex, 0x4 flowcontrol
**type**\: int
**range:** 0..4294967295
.. attribute:: speed
Restricted speed (if relevant bit is set in mask)
**type**\: :py:class:`EthernetSpeedEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetSpeedEnum>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.autoneg_enabled = None
self.config_override = None
self.duplex = None
self.flowcontrol = None
self.mask = None
self.speed = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:autoneg'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.autoneg_enabled is not None:
return True
if self.config_override is not None:
return True
if self.duplex is not None:
return True
if self.flowcontrol is not None:
return True
if self.mask is not None:
return True
if self.speed is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.Layer1Info.Autoneg']['meta_info']
class CurrentAlarms(object):
"""
Current alarms
.. attribute:: hi_ber_alarm
Hi BER
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: local_fault_alarm
Local Fault
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: loss_of_synchronization_data_alarm
Loss of Synchronization Data
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: pcs_loss_of_block_lock_alarm
PCS Loss of Block Lock
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: received_loss_of_signal_alarm
Received Loss of Signal
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: remote_fault_alarm
Remote Fault
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: sd_ber_alarm
SD BER
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: sf_ber_alarm
SF BER
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: squelch_alarm
Squelch
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.hi_ber_alarm = None
self.local_fault_alarm = None
self.loss_of_synchronization_data_alarm = None
self.pcs_loss_of_block_lock_alarm = None
self.received_loss_of_signal_alarm = None
self.remote_fault_alarm = None
self.sd_ber_alarm = None
self.sf_ber_alarm = None
self.squelch_alarm = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:current-alarms'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.hi_ber_alarm is not None:
return True
if self.local_fault_alarm is not None:
return True
if self.loss_of_synchronization_data_alarm is not None:
return True
if self.pcs_loss_of_block_lock_alarm is not None:
return True
if self.received_loss_of_signal_alarm is not None:
return True
if self.remote_fault_alarm is not None:
return True
if self.sd_ber_alarm is not None:
return True
if self.sf_ber_alarm is not None:
return True
if self.squelch_alarm is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.Layer1Info.CurrentAlarms']['meta_info']
class PreviousAlarms(object):
"""
Previous alarms
.. attribute:: hi_ber_alarm
Hi BER
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: local_fault_alarm
Local Fault
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: loss_of_synchronization_data_alarm
Loss of Synchronization Data
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: pcs_loss_of_block_lock_alarm
PCS Loss of Block Lock
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: received_loss_of_signal_alarm
Received Loss of Signal
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: remote_fault_alarm
Remote Fault
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: sd_ber_alarm
SD BER
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: sf_ber_alarm
SF BER
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
.. attribute:: squelch_alarm
Squelch
**type**\: :py:class:`EthCtrlrAlarmStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthCtrlrAlarmStateEnum>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.hi_ber_alarm = None
self.local_fault_alarm = None
self.loss_of_synchronization_data_alarm = None
self.pcs_loss_of_block_lock_alarm = None
self.received_loss_of_signal_alarm = None
self.remote_fault_alarm = None
self.sd_ber_alarm = None
self.sf_ber_alarm = None
self.squelch_alarm = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:previous-alarms'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.hi_ber_alarm is not None:
return True
if self.local_fault_alarm is not None:
return True
if self.loss_of_synchronization_data_alarm is not None:
return True
if self.pcs_loss_of_block_lock_alarm is not None:
return True
if self.received_loss_of_signal_alarm is not None:
return True
if self.remote_fault_alarm is not None:
return True
if self.sd_ber_alarm is not None:
return True
if self.sf_ber_alarm is not None:
return True
if self.squelch_alarm is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.Layer1Info.PreviousAlarms']['meta_info']
class ErrorCounts(object):
"""
Statistics for detected errors
.. attribute:: pcsbip_errors
PCS BIP error count
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: sync_header_errors
Sync\-header error count
**type**\: long
**range:** 0..18446744073709551615
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.pcsbip_errors = None
self.sync_header_errors = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:error-counts'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.pcsbip_errors is not None:
return True
if self.sync_header_errors is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.Layer1Info.ErrorCounts']['meta_info']
class BerMonitoring(object):
"""
BER monitoring details
.. attribute:: settings
The BER monitoring settings to be applied
**type**\: :py:class:`Settings <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.Layer1Info.BerMonitoring.Settings>`
.. attribute:: supported
Whether or not BER monitoring is supported
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.settings = EthernetInterface.Interfaces.Interface.Layer1Info.BerMonitoring.Settings()
self.settings.parent = self
self.supported = None
class Settings(object):
"""
The BER monitoring settings to be applied
.. attribute:: signal_degrade_alarm
Report alarm to indicate signal degrade
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: signal_degrade_threshold
BER threshold for signal to degrade
**type**\: int
**range:** 0..4294967295
.. attribute:: signal_fail_alarm
Report alarm to indicate signal failure
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: signal_fail_threshold
BER threshold for signal to fail
**type**\: int
**range:** 0..4294967295
.. attribute:: signal_remote_fault
Whether drivers should signal remote faults
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.signal_degrade_alarm = None
self.signal_degrade_threshold = None
self.signal_fail_alarm = None
self.signal_fail_threshold = None
self.signal_remote_fault = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:settings'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.signal_degrade_alarm is not None:
return True
if self.signal_degrade_threshold is not None:
return True
if self.signal_fail_alarm is not None:
return True
if self.signal_fail_threshold is not None:
return True
if self.signal_remote_fault is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.Layer1Info.BerMonitoring.Settings']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:ber-monitoring'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.settings is not None and self.settings._has_data():
return True
if self.supported is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.Layer1Info.BerMonitoring']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:layer1-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.autoneg is not None and self.autoneg._has_data():
return True
if self.bandwidth_utilization is not None:
return True
if self.ber_monitoring is not None and self.ber_monitoring._has_data():
return True
if self.current_alarms is not None and self.current_alarms._has_data():
return True
if self.duplex is not None:
return True
if self.error_counts is not None and self.error_counts._has_data():
return True
if self.flowcontrol is not None:
return True
if self.ipg is not None:
return True
if self.laser_squelch_enabled is not None:
return True
if self.led_state is not None:
return True
if self.link_state is not None:
return True
if self.previous_alarms is not None and self.previous_alarms._has_data():
return True
if self.speed is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.Layer1Info']['meta_info']
class MacInfo(object):
"""
MAC Layer information
.. attribute:: burned_in_mac_address
Port Burned\-In MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: mru
Port operational MRU
**type**\: int
**range:** 0..4294967295
.. attribute:: mtu
Port operational MTU
**type**\: int
**range:** 0..4294967295
.. attribute:: multicast_mac_filters
Port multicast MAC filter information
**type**\: :py:class:`MulticastMacFilters <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.MacInfo.MulticastMacFilters>`
.. attribute:: operational_mac_address
Port operational MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: unicast_mac_filters
Port unicast MAC filter information
**type**\: :py:class:`UnicastMacFilters <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.MacInfo.UnicastMacFilters>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.burned_in_mac_address = None
self.mru = None
self.mtu = None
self.multicast_mac_filters = EthernetInterface.Interfaces.Interface.MacInfo.MulticastMacFilters()
self.multicast_mac_filters.parent = self
self.operational_mac_address = None
self.unicast_mac_filters = EthernetInterface.Interfaces.Interface.MacInfo.UnicastMacFilters()
self.unicast_mac_filters.parent = self
class UnicastMacFilters(object):
"""
Port unicast MAC filter information
.. attribute:: unicast_mac_address
MAC addresses in the unicast ingress destination MAC filter
**type**\: list of str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.unicast_mac_address = YLeafList()
self.unicast_mac_address.parent = self
self.unicast_mac_address.name = 'unicast_mac_address'
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:unicast-mac-filters'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.unicast_mac_address is not None:
for child in self.unicast_mac_address:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.MacInfo.UnicastMacFilters']['meta_info']
class MulticastMacFilters(object):
"""
Port multicast MAC filter information
.. attribute:: multicast_mac_address
MAC addresses in the multicast ingress destination MAC filter
**type**\: list of :py:class:`MulticastMacAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Interfaces.Interface.MacInfo.MulticastMacFilters.MulticastMacAddress>`
.. attribute:: multicast_promiscuous
Whether the port is in multicast promiscuous mode
**type**\: bool
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.multicast_mac_address = YList()
self.multicast_mac_address.parent = self
self.multicast_mac_address.name = 'multicast_mac_address'
self.multicast_promiscuous = None
class MulticastMacAddress(object):
"""
MAC addresses in the multicast ingress
destination MAC filter
.. attribute:: mac_address
MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: mask
Mask for this MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.mac_address = None
self.mask = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:multicast-mac-address'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.mac_address is not None:
return True
if self.mask is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.MacInfo.MulticastMacFilters.MulticastMacAddress']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:multicast-mac-filters'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.multicast_mac_address is not None:
for child_ref in self.multicast_mac_address:
if child_ref._has_data():
return True
if self.multicast_promiscuous is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.MacInfo.MulticastMacFilters']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:mac-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.burned_in_mac_address is not None:
return True
if self.mru is not None:
return True
if self.mtu is not None:
return True
if self.multicast_mac_filters is not None and self.multicast_mac_filters._has_data():
return True
if self.operational_mac_address is not None:
return True
if self.unicast_mac_filters is not None and self.unicast_mac_filters._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.MacInfo']['meta_info']
class TransportInfo(object):
"""
Transport state information
.. attribute:: ains_status
AINS Soak status
**type**\: :py:class:`EtherAinsStatusEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EtherAinsStatusEnum>`
.. attribute:: maintenance_mode_enabled
Maintenance Mode \- TRUE if enabled
**type**\: bool
.. attribute:: remaining_duration
Remaining duration (seconds) of AINS soak timer
**type**\: int
**range:** 0..4294967295
.. attribute:: total_duration
Total duration (seconds) of AINS soak timer
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.ains_status = None
self.maintenance_mode_enabled = None
self.remaining_duration = None
self.total_duration = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:transport-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.ains_status is not None:
return True
if self.maintenance_mode_enabled is not None:
return True
if self.remaining_duration is not None:
return True
if self.total_duration is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface.TransportInfo']['meta_info']
@property
def _common_path(self):
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return '/Cisco-IOS-XR-drivers-media-eth-oper:ethernet-interface/Cisco-IOS-XR-drivers-media-eth-oper:interfaces/Cisco-IOS-XR-drivers-media-eth-oper:interface[Cisco-IOS-XR-drivers-media-eth-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.admin_state is not None:
return True
if self.layer1_info is not None and self.layer1_info._has_data():
return True
if self.mac_info is not None and self.mac_info._has_data():
return True
if self.oper_state_up is not None:
return True
if self.phy_info is not None and self.phy_info._has_data():
return True
if self.transport_info is not None and self.transport_info._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-drivers-media-eth-oper:ethernet-interface/Cisco-IOS-XR-drivers-media-eth-oper:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Interfaces']['meta_info']
class Berts(object):
"""
Ethernet controller BERT table
.. attribute:: bert
Ethernet BERT information
**type**\: list of :py:class:`Bert <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Berts.Bert>`
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.bert = YList()
self.bert.parent = self
self.bert.name = 'bert'
class Bert(object):
"""
Ethernet BERT information
.. attribute:: interface_name <key>
The name of the interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: bert_status
Current test status
**type**\: :py:class:`BertStatus <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetInterface.Berts.Bert.BertStatus>`
.. attribute:: port_bert_interval
Port BERT interval
**type**\: int
**range:** 0..4294967295
.. attribute:: time_left
Remaining time for this test in seconds
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.interface_name = None
self.bert_status = EthernetInterface.Berts.Bert.BertStatus()
self.bert_status.parent = self
self.port_bert_interval = None
self.time_left = None
class BertStatus(object):
"""
Current test status
.. attribute:: bert_state_enabled
State
**type**\: bool
.. attribute:: data_availability
Flag indicating available data
**type**\: int
**range:** 0..4294967295
.. attribute:: device_under_test
Device being tested
**type**\: :py:class:`EthernetDevEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetDevEnum>`
.. attribute:: error_type
Bit, block or frame error
**type**\: :py:class:`EthernetBertErrCntEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetBertErrCntEnum>`
.. attribute:: interface_device
Interface being tested
**type**\: :py:class:`EthernetDevIfEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetDevIfEnum>`
.. attribute:: receive_count
Receive count (if 0x1 set in flag)
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: receive_errors
Received errors (if 0x4 set in flag)
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: test_pattern
Test pattern
**type**\: :py:class:`EthernetBertPatternEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_drivers_media_eth_oper.EthernetBertPatternEnum>`
.. attribute:: transmit_count
Transmit count (if 0x2 set in flag)
**type**\: long
**range:** 0..18446744073709551615
"""
_prefix = 'drivers-media-eth-oper'
_revision = '2015-10-14'
def __init__(self):
self.parent = None
self.bert_state_enabled = None
self.data_availability = None
self.device_under_test = None
self.error_type = None
self.interface_device = None
self.receive_count = None
self.receive_errors = None
self.test_pattern = None
self.transmit_count = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-drivers-media-eth-oper:bert-status'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.bert_state_enabled is not None:
return True
if self.data_availability is not None:
return True
if self.device_under_test is not None:
return True
if self.error_type is not None:
return True
if self.interface_device is not None:
return True
if self.receive_count is not None:
return True
if self.receive_errors is not None:
return True
if self.test_pattern is not None:
return True
if self.transmit_count is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Berts.Bert.BertStatus']['meta_info']
@property
def _common_path(self):
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return '/Cisco-IOS-XR-drivers-media-eth-oper:ethernet-interface/Cisco-IOS-XR-drivers-media-eth-oper:berts/Cisco-IOS-XR-drivers-media-eth-oper:bert[Cisco-IOS-XR-drivers-media-eth-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.bert_status is not None and self.bert_status._has_data():
return True
if self.port_bert_interval is not None:
return True
if self.time_left is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Berts.Bert']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-drivers-media-eth-oper:ethernet-interface/Cisco-IOS-XR-drivers-media-eth-oper:berts'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.bert is not None:
for child_ref in self.bert:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface.Berts']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-drivers-media-eth-oper:ethernet-interface'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.berts is not None and self.berts._has_data():
return True
if self.interfaces is not None and self.interfaces._has_data():
return True
if self.statistics is not None and self.statistics._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_drivers_media_eth_oper as meta
return meta._meta_table['EthernetInterface']['meta_info']
| apache-2.0 | 5,103,785,533,784,643,000 | 29.011349 | 325 | 0.500112 | false |
gurneyalex/odoo | addons/stock/wizard/stock_picking_return.py | 4 | 10775 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools.float_utils import float_round
class ReturnPickingLine(models.TransientModel):
_name = "stock.return.picking.line"
_rec_name = 'product_id'
_description = 'Return Picking Line'
product_id = fields.Many2one('product.product', string="Product", required=True, domain="[('id', '=', product_id)]")
quantity = fields.Float("Quantity", digits='Product Unit of Measure', required=True)
uom_id = fields.Many2one('uom.uom', string='Unit of Measure', related='move_id.product_uom', readonly=False)
wizard_id = fields.Many2one('stock.return.picking', string="Wizard")
move_id = fields.Many2one('stock.move', "Move")
class ReturnPicking(models.TransientModel):
_name = 'stock.return.picking'
_description = 'Return Picking'
@api.model
def default_get(self, fields):
if len(self.env.context.get('active_ids', list())) > 1:
raise UserError(_("You may only return one picking at a time."))
res = super(ReturnPicking, self).default_get(fields)
if self.env.context.get('active_id') and self.env.context.get('active_model') == 'stock.picking':
picking = self.env['stock.picking'].browse(self.env.context.get('active_id'))
if picking.exists():
res.update({'picking_id': picking.id})
return res
picking_id = fields.Many2one('stock.picking')
product_return_moves = fields.One2many('stock.return.picking.line', 'wizard_id', 'Moves')
move_dest_exists = fields.Boolean('Chained Move Exists', readonly=True)
original_location_id = fields.Many2one('stock.location')
parent_location_id = fields.Many2one('stock.location')
company_id = fields.Many2one(related='picking_id.company_id')
location_id = fields.Many2one(
'stock.location', 'Return Location',
domain="['|', ('id', '=', original_location_id), '|', '&', ('return_location', '=', True), ('company_id', '=', False), '&', ('return_location', '=', True), ('company_id', '=', company_id)]")
@api.onchange('picking_id')
def _onchange_picking_id(self):
move_dest_exists = False
product_return_moves = [(5,)]
if self.picking_id and self.picking_id.state != 'done':
raise UserError(_("You may only return Done pickings."))
# In case we want to set specific default values (e.g. 'to_refund'), we must fetch the
# default values for creation.
line_fields = [f for f in self.env['stock.return.picking.line']._fields.keys()]
product_return_moves_data_tmpl = self.env['stock.return.picking.line'].default_get(line_fields)
for move in self.picking_id.move_lines:
if move.state == 'cancel':
continue
if move.scrapped:
continue
if move.move_dest_ids:
move_dest_exists = True
product_return_moves_data = dict(product_return_moves_data_tmpl)
product_return_moves_data.update(self._prepare_stock_return_picking_line_vals_from_move(move))
product_return_moves.append((0, 0, product_return_moves_data))
if self.picking_id and not product_return_moves:
raise UserError(_("No products to return (only lines in Done state and not fully returned yet can be returned)."))
if self.picking_id:
self.product_return_moves = product_return_moves
self.move_dest_exists = move_dest_exists
self.parent_location_id = self.picking_id.picking_type_id.warehouse_id and self.picking_id.picking_type_id.warehouse_id.view_location_id.id or self.picking_id.location_id.location_id.id
self.original_location_id = self.picking_id.location_id.id
location_id = self.picking_id.location_id.id
if self.picking_id.picking_type_id.return_picking_type_id.default_location_dest_id.return_location:
location_id = self.picking_id.picking_type_id.return_picking_type_id.default_location_dest_id.id
self.location_id = location_id
@api.model
def _prepare_stock_return_picking_line_vals_from_move(self, stock_move):
quantity = stock_move.product_qty
for move in stock_move.move_dest_ids:
if move.origin_returned_move_id and move.origin_returned_move_id != stock_move:
continue
if move.state in ('partially_available', 'assigned'):
quantity -= sum(move.move_line_ids.mapped('product_qty'))
elif move.state in ('done'):
quantity -= move.product_qty
quantity = float_round(quantity, precision_rounding=stock_move.product_uom.rounding)
return {
'product_id': stock_move.product_id.id,
'quantity': quantity,
'move_id': stock_move.id,
'uom_id': stock_move.product_id.uom_id.id,
}
def _prepare_move_default_values(self, return_line, new_picking):
vals = {
'product_id': return_line.product_id.id,
'product_uom_qty': return_line.quantity,
'product_uom': return_line.product_id.uom_id.id,
'picking_id': new_picking.id,
'state': 'draft',
'date_expected': fields.Datetime.now(),
'location_id': return_line.move_id.location_dest_id.id,
'location_dest_id': self.location_id.id or return_line.move_id.location_id.id,
'picking_type_id': new_picking.picking_type_id.id,
'warehouse_id': self.picking_id.picking_type_id.warehouse_id.id,
'origin_returned_move_id': return_line.move_id.id,
'procure_method': 'make_to_stock',
}
return vals
def _create_returns(self):
# TODO sle: the unreserve of the next moves could be less brutal
for return_move in self.product_return_moves.mapped('move_id'):
return_move.move_dest_ids.filtered(lambda m: m.state not in ('done', 'cancel'))._do_unreserve()
# create new picking for returned products
picking_type_id = self.picking_id.picking_type_id.return_picking_type_id.id or self.picking_id.picking_type_id.id
new_picking = self.picking_id.copy({
'move_lines': [],
'picking_type_id': picking_type_id,
'state': 'draft',
'origin': _("Return of %s") % self.picking_id.name,
'location_id': self.picking_id.location_dest_id.id,
'location_dest_id': self.location_id.id})
new_picking.message_post_with_view('mail.message_origin_link',
values={'self': new_picking, 'origin': self.picking_id},
subtype_id=self.env.ref('mail.mt_note').id)
returned_lines = 0
for return_line in self.product_return_moves:
if not return_line.move_id:
raise UserError(_("You have manually created product lines, please delete them to proceed."))
# TODO sle: float_is_zero?
if return_line.quantity:
returned_lines += 1
vals = self._prepare_move_default_values(return_line, new_picking)
r = return_line.move_id.copy(vals)
vals = {}
# +--------------------------------------------------------------------------------------------------------+
# | picking_pick <--Move Orig-- picking_pack --Move Dest--> picking_ship
# | | returned_move_ids ↑ | returned_move_ids
# | ↓ | return_line.move_id ↓
# | return pick(Add as dest) return toLink return ship(Add as orig)
# +--------------------------------------------------------------------------------------------------------+
move_orig_to_link = return_line.move_id.move_dest_ids.mapped('returned_move_ids')
# link to original move
move_orig_to_link |= return_line.move_id
# link to siblings of original move, if any
move_orig_to_link |= return_line.move_id\
.mapped('move_dest_ids').filtered(lambda m: m.state not in ('cancel'))\
.mapped('move_orig_ids').filtered(lambda m: m.state not in ('cancel'))
move_dest_to_link = return_line.move_id.move_orig_ids.mapped('returned_move_ids')
# link to children of originally returned moves, if any. Note that the use of
# 'return_line.move_id.move_orig_ids.returned_move_ids.move_orig_ids.move_dest_ids'
# instead of 'return_line.move_id.move_orig_ids.move_dest_ids' prevents linking a
# return directly to the destination moves of its parents. However, the return of
# the return will be linked to the destination moves.
move_dest_to_link |= return_line.move_id.move_orig_ids.mapped('returned_move_ids')\
.mapped('move_orig_ids').filtered(lambda m: m.state not in ('cancel'))\
.mapped('move_dest_ids').filtered(lambda m: m.state not in ('cancel'))
vals['move_orig_ids'] = [(4, m.id) for m in move_orig_to_link]
vals['move_dest_ids'] = [(4, m.id) for m in move_dest_to_link]
r.write(vals)
if not returned_lines:
raise UserError(_("Please specify at least one non-zero quantity."))
new_picking.action_confirm()
new_picking.action_assign()
return new_picking.id, picking_type_id
def create_returns(self):
for wizard in self:
new_picking_id, pick_type_id = wizard._create_returns()
# Override the context to disable all the potential filters that could have been set previously
ctx = dict(self.env.context)
ctx.update({
'default_partner_id': self.picking_id.partner_id.id,
'search_default_picking_type_id': pick_type_id,
'search_default_draft': False,
'search_default_assigned': False,
'search_default_confirmed': False,
'search_default_ready': False,
'search_default_late': False,
'search_default_available': False,
})
return {
'name': _('Returned Picking'),
'view_mode': 'form,tree,calendar',
'res_model': 'stock.picking',
'res_id': new_picking_id,
'type': 'ir.actions.act_window',
'context': ctx,
}
| agpl-3.0 | 6,509,121,793,401,225,000 | 54.225641 | 198 | 0.58297 | false |
cliixtech/bigorna | tests/tasks/test_task_factory.py | 1 | 1725 | from unittest import TestCase
from unittest.mock import create_autospec
from nose.tools import istest, raises
from bigorna.commons.config import Config
from bigorna.tasks import TaskFactory, TaskDefinition
class TaskFactoryTest(TestCase):
def setUp(self):
TestCase.setUp(self)
self.out = "%s.out"
self.config_mock = create_autospec(Config)
self.config_mock.tasks = [{'name': 'ls', 'cmd': 'ls -la {dirname}'},
{'name': 'cp', 'cmd': 'cp {orig} {dest}'}]
self.factory = TaskFactory(self.config_mock)
@istest
def create_return_task_def(self):
t_def = self.factory.create_task_definition('ls', {'dirname': '/home'}, self.out)
self.assertEquals(t_def.cmd, 'ls -la /home')
self.assertIsNotNone(t_def.output_file)
@raises(KeyError)
@istest
def create_raise_error_if_not_enough_params(self):
self.factory.create_task_definition('cp', {'orig': '/home'}, self.out)
@raises(KeyError)
@istest
def create_raise_error_if_invalid_command(self):
self.factory.create_task_definition('cd', {'dirname': '/home'}, self.out)
class TaskDefinitionTest(TestCase):
@istest
def create_task_def_generates_id(self):
t_def = TaskDefinition("ls /", None)
self.assertIsNotNone(t_def.id)
self.assertIsInstance(t_def.id, str)
@istest
def create_task_def_sets_cwd(self):
t_def = TaskDefinition("ls /", '/home', "%s.out")
self.assertEquals(t_def.base_dir, '/home')
@istest
def create_task_output_from_id(self):
t_def = TaskDefinition("ls /", None, "%s.out")
self.assertEquals(t_def.output_file, "%s.out" % t_def.id)
| gpl-3.0 | 6,829,806,794,336,506,000 | 30.363636 | 89 | 0.627246 | false |
hrishioa/Aviato | flask/Scripts/gdal_polygonize.py | 1 | 6499 | #!C:\Users\SeanSaito\Dev\aviato\flask\Scripts\python.exe
# -*- coding: utf-8 -*-
#******************************************************************************
# $Id$
#
# Project: GDAL Python Interface
# Purpose: Application for converting raster data to a vector polygon layer.
# Author: Frank Warmerdam, [email protected]
#
#******************************************************************************
# Copyright (c) 2008, Frank Warmerdam
# Copyright (c) 2009-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#******************************************************************************
try:
from osgeo import gdal, ogr, osr
except ImportError:
import gdal, ogr, osr
import sys
import os.path
def Usage():
print("""
gdal_polygonize [-8] [-nomask] [-mask filename] raster_file [-b band]
[-q] [-f ogr_format] out_file [layer] [fieldname]
""")
sys.exit(1)
# =============================================================================
# Mainline
# =============================================================================
format = 'GML'
options = []
quiet_flag = 0
src_filename = None
src_band_n = 1
dst_filename = None
dst_layername = None
dst_fieldname = None
dst_field = -1
mask = 'default'
gdal.AllRegister()
argv = gdal.GeneralCmdLineProcessor( sys.argv )
if argv is None:
sys.exit( 0 )
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-f':
i = i + 1
format = argv[i]
elif arg == '-q' or arg == '-quiet':
quiet_flag = 1
elif arg == '-8':
options.append('8CONNECTED=8')
elif arg == '-nomask':
mask = 'none'
elif arg == '-mask':
i = i + 1
mask = argv[i]
elif arg == '-b':
i = i + 1
src_band_n = int(argv[i])
elif src_filename is None:
src_filename = argv[i]
elif dst_filename is None:
dst_filename = argv[i]
elif dst_layername is None:
dst_layername = argv[i]
elif dst_fieldname is None:
dst_fieldname = argv[i]
else:
Usage()
i = i + 1
if src_filename is None or dst_filename is None:
Usage()
if dst_layername is None:
dst_layername = 'out'
# =============================================================================
# Verify we have next gen bindings with the polygonize method.
# =============================================================================
try:
gdal.Polygonize
except:
print('')
print('gdal.Polygonize() not available. You are likely using "old gen"')
print('bindings or an older version of the next gen bindings.')
print('')
sys.exit(1)
# =============================================================================
# Open source file
# =============================================================================
src_ds = gdal.Open( src_filename )
if src_ds is None:
print('Unable to open %s' % src_filename)
sys.exit(1)
srcband = src_ds.GetRasterBand(src_band_n)
if mask is 'default':
maskband = srcband.GetMaskBand()
elif mask is 'none':
maskband = None
else:
mask_ds = gdal.Open( mask )
maskband = mask_ds.GetRasterBand(1)
# =============================================================================
# Try opening the destination file as an existing file.
# =============================================================================
try:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
dst_ds = ogr.Open( dst_filename, update=1 )
gdal.PopErrorHandler()
except:
dst_ds = None
# =============================================================================
# Create output file.
# =============================================================================
if dst_ds is None:
drv = ogr.GetDriverByName(format)
if not quiet_flag:
print('Creating output %s of format %s.' % (dst_filename, format))
dst_ds = drv.CreateDataSource( dst_filename )
# =============================================================================
# Find or create destination layer.
# =============================================================================
try:
dst_layer = dst_ds.GetLayerByName(dst_layername)
except:
dst_layer = None
if dst_layer is None:
srs = None
if src_ds.GetProjectionRef() != '':
srs = osr.SpatialReference()
srs.ImportFromWkt( src_ds.GetProjectionRef() )
dst_layer = dst_ds.CreateLayer(dst_layername, srs = srs )
if dst_fieldname is None:
dst_fieldname = 'DN'
fd = ogr.FieldDefn( dst_fieldname, ogr.OFTInteger )
dst_layer.CreateField( fd )
dst_field = 0
else:
if dst_fieldname is not None:
dst_field = dst_layer.GetLayerDefn().GetFieldIndex(dst_fieldname)
if dst_field < 0:
print("Warning: cannot find field '%s' in layer '%s'" % (dst_fieldname, dst_layername))
# =============================================================================
# Invoke algorithm.
# =============================================================================
if quiet_flag:
prog_func = None
else:
prog_func = gdal.TermProgress
result = gdal.Polygonize( srcband, maskband, dst_layer, dst_field, options,
callback = prog_func )
srcband = None
src_ds = None
dst_ds = None
mask_ds = None
| gpl-2.0 | -8,949,319,689,744,370,000 | 28.274775 | 99 | 0.511002 | false |
3ffusi0on/Addicted-to-XDCC | Addict7ed.py | 1 | 3211 | #!/usr/bin/env python3.4
import sys
from PyQt4 import QtGui
import re
import os, sys
import subprocess
import urllib.request
import urllib.error
import hashlib
#TODO
#-input for the link of xdcc server
#-dl button ? or automatize the action
#- /!\ Configuration file /!\
def get_hash(name):
readsize = 64 * 1024
with open(name, 'rb') as f:
size = os.path.getsize(name)
data = f.read(readsize)
f.seek(-readsize, os.SEEK_END)
data += f.read(readsize)
return hashlib.md5(data).hexdigest()
class UI(QtGui.QWidget):
def __init__(self):
super(UI, self).__init__()
self.initUI()
def initUI(self):
self.setGeometry(20, 40, 300, 120)
self.setWindowTitle('Addict7ed-to-Xdcc')
self.link = QtGui.QLineEdit()
#TODO make it like a promt
self.link.setText("Xdcc link...")
#xdcc file download button
downloadMovieButton = QtGui.QPushButton('Get movie')
downloadMovieButton.resize(downloadMovieButton.sizeHint())
downloadMovieButton.clicked.connect(self.downloadXdccFile)
#pick file button
pickButton = QtGui.QPushButton('Open...')
pickButton.resize(pickButton.sizeHint())
pickButton.clicked.connect(self.selectFile)
#selected file
self.filename = QtGui.QLabel()
self.filename.setText("...")
#subtitle download button
downloadSubButton = QtGui.QPushButton('Get Subtitle')
downloadSubButton.resize(downloadSubButton.sizeHint())
downloadSubButton.clicked.connect(self.downloadSubtitle)
## Layouts
vbox = QtGui.QVBoxLayout()
vbox.addStretch(1)
vbox.addWidget(self.link)
vbox.addWidget(downloadMovieButton)
vbox.addWidget(pickButton)
vbox.addWidget(self.filename)
vbox.addWidget(downloadSubButton)
self.setLayout(vbox)
self.show()
def selectFile(self):
self.filename.setText(QtGui.QFileDialog.getOpenFileName())
print(self.filename.text())
def downloadXdccFile(self):
print("TODO")
def downloadSubtitle(self):
filename = self.filename.text()
track_hash = get_hash(filename)
headers = { 'User-Agent' : 'SubDB/1.0 (Addict7ed-to-Xdcc/1.0; http://github.com/3ffusi0on/Addict7ed-to-Xdcc)' }
url = "http://api.thesubdb.com/?action=download&hash=" + track_hash + "&language=en"
try:
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request).read()
print(response)
#Saving the subtitle fileo
dest_file = filename.replace(filename[-3:], 'srt')
print("Saving subtitle as :" + dest_file)
subtitle_file = open(dest_file, 'wb')
subtitle_file.write(response)
subtitle_file.close()
except urllib.error.HTTPError as e:
#TODO check error (missing subtitle on server)
if e.code == 404:
print("404 Not Found: No subtitle available for the movie")
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
ui = UI()
sys.exit(app.exec_())
| gpl-2.0 | -566,545,068,900,936,960 | 29.875 | 119 | 0.624416 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/operations/_express_route_service_providers_operations.py | 1 | 5203 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteServiceProvidersOperations(object):
"""ExpressRouteServiceProvidersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteServiceProviderListResult"]
"""Gets all the available express route service providers.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteServiceProviderListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_12_01.models.ExpressRouteServiceProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteServiceProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteServiceProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'} # type: ignore
| mit | 8,445,812,758,290,213,000 | 45.044248 | 135 | 0.648472 | false |
Firefly-Automation/Firefly | Firefly/automation/door_motion_triggered_lights/metadata.py | 1 | 1710 | AUTHOR = 'Zachary Priddy. ([email protected])'
TITLE = 'Motion/Door Triggered Lights'
METADATA = {
'title': TITLE,
'author': AUTHOR,
'commands': ['execute'],
'interface': {
'sensors': {
'motion': {
'context': 'motion sensors to use to trigger light',
'filter': {
'motion': True
},
'type': 'deviceList'
},
'door': {
'context': 'door sensors to trigger lights',
'filter': {
'contact': True
},
'type': 'deviceList'
}
},
'lights': {
"light": {
'context': 'lights to turn on/off',
'type': 'deviceList',
'filter': {
'deviceType': ['light', 'switch']
}
},
},
'commands': {
"on": {
'context': 'command to send to lights to turn on',
'type': 'command',
'filter': {}
},
"off": {
'context': 'command to send to lights on to turn off',
'type': 'command',
'filter': {}
}
},
'actions': {
'on': {
'context': 'This is auto generated'
},
'off': {
'context': 'This is auto generated'
}
},
'custom': {
'custom_actions': {
'context': 'set to true if using custom action',
'type': 'bool'
}
},
'conditions': {
"on": {
'context': 'condition for turning lights on'
},
'off': {
'context': 'condition for turning lights off'
}
},
'delays': {
'off': {
'context': 'Time to delay before turning all lights off (seconds)',
'type': 'number'
}
}
}
}
| apache-2.0 | -654,213,190,019,018,900 | 22.108108 | 75 | 0.433918 | false |
fp12/achallonge | challonge/enums.py | 1 | 1529 | from enum import Enum
class TournamentState(Enum):
""" State a tournament can be in """
pending = 'pending'
open_ = 'open' #: can't use `open`
complete = 'complete'
in_progress = 'in progress'
class TournamentType(Enum):
""" Type of a tournament """
single_elimination = 'single elimination'
double_elimination = 'double elimination'
round_robin = 'round robin'
swiss = 'swiss'
class TournamentStateResult(Enum):
""" State given from the Challonge API.
Can be different from :class:`TournamentState`
"""
underway = 0
pending = 1
class DoubleEliminationEnding(Enum):
""" Type of ending for double elimination tournaments """
default = None #: give the winners bracket finalist two chances to beat the losers bracket finalist
single_match = 'single_match' #: create only one grand finals match
no_grand_finals = 'skip' #: don't create a finals match between winners and losers bracket finalists
class RankingOrder(Enum):
""" Order the ranking should be built upon """
match_wins = 'match wins'
game_wins = 'game wins'
points_scored = 'points scored'
points_difference = 'points difference'
custom = 'custom'
class Pairing(Enum):
""" Method of participant pairing when building matches """
seeds = 0
sequential = 1
class MatchState(Enum):
""" State a match can be in """
all_ = 'all' #: can't use `all`
open_ = 'open' #: can't use `open`
pending = 'pending'
complete = 'complete'
| mit | 7,507,871,851,981,742,000 | 26.8 | 105 | 0.657292 | false |
BhallaLab/moose | moose-gui/plugins/kkitOrdinateUtil.py | 1 | 19921 | __author__ = "HarshaRani"
__credits__ = ["Upi Lab"]
__license__ = "GPL3"
__version__ = "1.0.0"
__maintainer__ = "HarshaRani"
__email__ = "[email protected]"
__status__ = "Development"
__updated__ = "Oct 26 2018"
'''
2018
Oct 26: xfer molecules are not put into screen
Sep 28: to zoom the kkit co-ordinates a factor of w=1000 and h=800 is multipled here
2017
Oct 18: moved some function to kkitUtil
getxyCord, etc function are added
'''
import collections
from moose import *
import numpy as np
from moose import wildcardFind,element,PoolBase,CplxEnzBase,Annotator,exists
from networkx.drawing.nx_agraph import graphviz_layout
import numpy as np
import networkx as nx
from kkitUtil import getRandColor,colorCheck,findCompartment, findGroup, findGroup_compt, mooseIsInstance
from PyQt4.QtGui import QColor
import re
import moose._moose as moose
def getxyCord(xcord,ycord,list1):
for item in list1:
# if isinstance(item,Function):
# objInfo = element(item.parent).path+'/info'
# else:
# objInfo = item.path+'/info'
if not isinstance(item,Function):
objInfo = item.path+'/info'
xcord.append(xyPosition(objInfo,'x'))
ycord.append(xyPosition(objInfo,'y'))
def xyPosition(objInfo,xory):
try:
return(float(element(objInfo).getField(xory)))
except ValueError:
return (float(0))
'''
def mooseIsInstance(melement, classNames):
return element(melement).__class__.__name__ in classNames
def findCompartment(melement):
while not mooseIsInstance(melement, ["CubeMesh", "CyclMesh"]):
melement = melement.parent
return melement
def findGroup(melement):
while not mooseIsInstance(melement, ["Neutral"]):
melement = melement.parent
return melement
def findGroup_compt(melement):
while not (mooseIsInstance(melement, ["Neutral","CubeMesh", "CyclMesh"])):
melement = melement.parent
return melement
'''
def populateMeshEntry(meshEntry,parent,types,obj):
#print " parent ",parent, "types ",types, " obj ",obj
try:
value = meshEntry[element(parent.path)][types]
except KeyError:
# Key is not present
meshEntry[element(parent.path)].update({types :[element(obj)]})
else:
mlist = meshEntry[element(parent.path)][types]
mlist.append(element(obj))
def updateMeshObj(modelRoot):
print " updateMeshObj "
meshEntry = {}
if meshEntry:
meshEntry.clear()
else:
meshEntry = {}
objPar = collections.OrderedDict()
for compt in wildcardFind(modelRoot+'/##[ISA=ChemCompt]'):
groupColor = []
try:
value = meshEntry[element(compt)]
except KeyError:
# Compt is not present
meshEntry[element(compt)] = {}
objPar[element(compt)] = element('/')
for grp in wildcardFind(compt.path+'/##[TYPE=Neutral]'):
test = [x for x in wildcardFind(element(grp).path+'/#') if x.className in ["Pool","Reac","Enz"]]
grp_cmpt = findGroup_compt(grp.parent)
try:
value = meshEntry[element(grp)]
except KeyError:
# Grp is not present
meshEntry[element(grp)] = {}
objPar[element(grp)] = element(grp_cmpt)
for compt in wildcardFind(modelRoot+'/##[ISA=ChemCompt]'):
for m in wildcardFind(compt.path+'/##[ISA=PoolBase]'):
grp_cmpt = findGroup_compt(m)
if isinstance(element(grp_cmpt),Neutral):
if isinstance(element(m.parent),EnzBase):
populateMeshEntry(meshEntry,grp_cmpt,"cplx",m)
else:
populateMeshEntry(meshEntry,grp_cmpt,"pool",m)
else:
if isinstance(element(m.parent),EnzBase):
populateMeshEntry(meshEntry,compt,"cplx",m)
else:
populateMeshEntry(meshEntry,compt,"pool",m)
for r in wildcardFind(compt.path+'/##[ISA=ReacBase]'):
rgrp_cmpt = findGroup_compt(r)
if isinstance(element(rgrp_cmpt),Neutral):
populateMeshEntry(meshEntry,rgrp_cmpt,"reaction",r)
else:
populateMeshEntry(meshEntry,compt,"reaction",r)
for e in wildcardFind(compt.path+'/##[ISA=EnzBase]'):
egrp_cmpt = findGroup_compt(e)
if isinstance(element(egrp_cmpt),Neutral):
populateMeshEntry(meshEntry,egrp_cmpt,"enzyme",e)
else:
populateMeshEntry(meshEntry,compt,"enzyme",e)
for f in wildcardFind(compt.path+'/##[ISA=Function]'):
fgrp_cmpt = findGroup_compt(f)
if isinstance(element(fgrp_cmpt),Neutral):
populateMeshEntry(meshEntry,fgrp_cmpt,"function",f)
else:
populateMeshEntry(meshEntry,compt,"function",f)
for t in wildcardFind(compt.path+'/##[ISA=StimulusTable]'):
tgrp_cmpt = findGroup_compt(t)
if isinstance(element(tgrp_cmpt),Neutral):
populateMeshEntry(meshEntry,tgrp_cmpt,"stimTab",t)
else:
populateMeshEntry(meshEntry,compt,"stimTab",t)
return(objPar,meshEntry)
def setupMeshObj(modelRoot):
''' Setup compartment and its members pool,reaction,enz cplx under self.meshEntry dictionaries \
self.meshEntry with "key" as compartment,
value is key2:list where key2 represents moose object type,list of objects of a perticular type
e.g self.meshEntry[meshEnt] = { 'reaction': reaction_list,'enzyme':enzyme_list,'pool':poollist,'cplx': cplxlist }
'''
xmin = 0.0
xmax = 1.0
ymin = 0.0
ymax = 1.0
positionInfoExist = True
meshEntry = {}
if meshEntry:
meshEntry.clear()
else:
meshEntry = {}
xcord = []
ycord = []
n = 1
objPar = collections.OrderedDict()
for compt in wildcardFind(modelRoot+'/##[ISA=ChemCompt]'):
groupColor = []
try:
value = meshEntry[element(compt)]
except KeyError:
# Compt is not present
meshEntry[element(compt)] = {}
objPar[element(compt)] = element('/')
for grp in wildcardFind(compt.path+'/##[TYPE=Neutral]'):
test = [x for x in wildcardFind(element(grp).path+'/#') if x.className in ["Pool","Reac","Enz"]]
#if len(test) >1:
grpinfo = Annotator(element(grp).path+'/info')
validatecolor = colorCheck(grpinfo.color,"bg")
validatedgrpcolor = str(QColor(validatecolor).name())
groupColor.append(validatedgrpcolor)
grp_cmpt = findGroup_compt(grp.parent)
try:
value = meshEntry[element(grp)]
except KeyError:
# Grp is not present
meshEntry[element(grp)] = {}
objPar[element(grp)] = element(grp_cmpt)
# if n > 1:
# validatecolor = colorCheck(grpinfo.color,"bg")
# validatedgrpcolor = str(QColor(validatecolor).name())
# if validatedgrpcolor in groupColor:
# print " inside "
# c = getRandColor()
# print " c ",c, c.name()
# grpinfo.color = str(c.name())
# groupColor.append(str(c.name()))
# print " groupColor ",grpinfo,grpinfo.color, groupColor
# n =n +1
for compt in wildcardFind(modelRoot+'/##[ISA=ChemCompt]'):
for m in wildcardFind(compt.path+'/##[ISA=PoolBase]'):
if not re.search("xfer",m.name):
grp_cmpt = findGroup_compt(m)
xcord.append(xyPosition(m.path+'/info','x'))
ycord.append(xyPosition(m.path+'/info','y'))
if isinstance(element(grp_cmpt),Neutral):
if isinstance(element(m.parent),EnzBase):
populateMeshEntry(meshEntry,grp_cmpt,"cplx",m)
else:
populateMeshEntry(meshEntry,grp_cmpt,"pool",m)
else:
if isinstance(element(m.parent),EnzBase):
populateMeshEntry(meshEntry,compt,"cplx",m)
else:
populateMeshEntry(meshEntry,compt,"pool",m)
for r in wildcardFind(compt.path+'/##[ISA=ReacBase]'):
rgrp_cmpt = findGroup_compt(r)
xcord.append(xyPosition(r.path+'/info','x'))
ycord.append(xyPosition(r.path+'/info','y'))
if isinstance(element(rgrp_cmpt),Neutral):
populateMeshEntry(meshEntry,rgrp_cmpt,"reaction",r)
else:
populateMeshEntry(meshEntry,compt,"reaction",r)
for e in wildcardFind(compt.path+'/##[ISA=EnzBase]'):
egrp_cmpt = findGroup_compt(e)
xcord.append(xyPosition(e.path+'/info','x'))
ycord.append(xyPosition(e.path+'/info','y'))
if isinstance(element(egrp_cmpt),Neutral):
populateMeshEntry(meshEntry,egrp_cmpt,"enzyme",e)
else:
populateMeshEntry(meshEntry,compt,"enzyme",e)
for f in wildcardFind(compt.path+'/##[ISA=Function]'):
fgrp_cmpt = findGroup_compt(f)
if isinstance(element(fgrp_cmpt),Neutral):
populateMeshEntry(meshEntry,fgrp_cmpt,"function",f)
else:
populateMeshEntry(meshEntry,compt,"function",f)
for t in wildcardFind(compt.path+'/##[ISA=StimulusTable]'):
tgrp_cmpt = findGroup_compt(t)
xcord.append(xyPosition(t.path+'/info','x'))
ycord.append(xyPosition(t.path+'/info','y'))
if isinstance(element(tgrp_cmpt),Neutral):
populateMeshEntry(meshEntry,tgrp_cmpt,"stimTab",t)
else:
populateMeshEntry(meshEntry,compt,"stimTab",t)
xmin = min(xcord)
xmax = max(xcord)
ymin = min(ycord)
ymax = max(ycord)
positionInfoExist = not(len(np.nonzero(xcord)[0]) == 0 and len(np.nonzero(ycord)[0]) == 0)
return(objPar,meshEntry,xmin,xmax,ymin,ymax,positionInfoExist)
'''
def setupMeshObj(modelRoot):
# Setup compartment and its members pool,reaction,enz cplx under self.meshEntry dictionaries \
# self.meshEntry with "key" as compartment,
# value is key2:list where key2 represents moose object type,list of objects of a perticular type
# e.g self.meshEntry[meshEnt] = { 'reaction': reaction_list,'enzyme':enzyme_list,'pool':poollist,'cplx': cplxlist }
meshEntry = {}
if meshEntry:
meshEntry.clear()
else:
meshEntry = {}
xcord = []
ycord = []
meshEntryWildcard = '/##[ISA=ChemCompt]'
if modelRoot != '/':
meshEntryWildcard = modelRoot+meshEntryWildcard
for meshEnt in wildcardFind(meshEntryWildcard):
mollist = []
realist = []
enzlist = []
cplxlist = []
tablist = []
funclist = []
mol_cpl = wildcardFind(meshEnt.path+'/##[ISA=PoolBase]')
funclist = wildcardFind(meshEnt.path+'/##[ISA=Function]')
enzlist = wildcardFind(meshEnt.path+'/##[ISA=EnzBase]')
realist = wildcardFind(meshEnt.path+'/##[ISA=ReacBase]')
tablist = wildcardFind(meshEnt.path+'/##[ISA=StimulusTable]')
if mol_cpl or funclist or enzlist or realist or tablist:
for m in mol_cpl:
if isinstance(element(m.parent),CplxEnzBase):
cplxlist.append(m)
elif isinstance(element(m),moose.PoolBase):
mollist.append(m)
meshEntry[meshEnt] = {'enzyme':enzlist,
'reaction':realist,
'pool':mollist,
'cplx':cplxlist,
'table':tablist,
'function':funclist
}
for mert in [mollist,enzlist,realist,tablist]:
for merts in mert:
objInfo = merts.path+'/info'
if exists(objInfo):
xcord.append(element(objInfo).x)
ycord.append(element(objInfo).y)
return(meshEntry,xcord,ycord)
def sizeHint(self):
return QtCore.QSize(800,400)
'''
def setupItem(modelPath,cntDict):
# This function collects information of what is connected to what. \
# eg. substrate and product connectivity to reaction's and enzyme's \
# sumtotal connectivity to its pool are collected
#print " setupItem"
sublist = []
prdlist = []
zombieType = ['ReacBase','EnzBase','Function','StimulusTable']
for baseObj in zombieType:
path = '/##[ISA='+baseObj+']'
if modelPath != '/':
path = modelPath+path
if ( (baseObj == 'ReacBase') or (baseObj == 'EnzBase')):
for items in wildcardFind(path):
sublist = []
prdlist = []
uniqItem,countuniqItem = countitems(items,'subOut')
subNo = uniqItem
for sub in uniqItem:
sublist.append((element(sub),'s',countuniqItem[sub]))
uniqItem,countuniqItem = countitems(items,'prd')
prdNo = uniqItem
if (len(subNo) == 0 or len(prdNo) == 0):
print ("Substrate Product is empty ",path, " ",items)
for prd in uniqItem:
prdlist.append((element(prd),'p',countuniqItem[prd]))
if (baseObj == 'CplxEnzBase') :
uniqItem,countuniqItem = countitems(items,'toEnz')
for enzpar in uniqItem:
sublist.append((element(enzpar),'t',countuniqItem[enzpar]))
uniqItem,countuniqItem = countitems(items,'cplxDest')
for cplx in uniqItem:
prdlist.append((element(cplx),'cplx',countuniqItem[cplx]))
if (baseObj == 'EnzBase'):
uniqItem,countuniqItem = countitems(items,'enzDest')
for enzpar in uniqItem:
sublist.append((element(enzpar),'t',countuniqItem[enzpar]))
cntDict[items] = sublist,prdlist
elif baseObj == 'Function':
for items in wildcardFind(path):
sublist = []
prdlist = []
item = items.path+'/x[0]'
uniqItem,countuniqItem = countitems(item,'input')
for funcpar in uniqItem:
sublist.append((element(funcpar),'sts',countuniqItem[funcpar]))
uniqItem,countuniqItem = countitems(items,'valueOut')
for funcpar in uniqItem:
prdlist.append((element(funcpar),'stp',countuniqItem[funcpar]))
cntDict[items] = sublist,prdlist
else:
for tab in wildcardFind(path):
tablist = []
uniqItem,countuniqItem = countitems(tab,'output')
for tabconnect in uniqItem:
tablist.append((element(tabconnect),'tab',countuniqItem[tabconnect]))
cntDict[tab] = tablist
def countitems(mitems,objtype):
items = []
items = element(mitems).neighbors[objtype]
uniqItems = set(items)
#countuniqItemsauto = Counter(items)
countuniqItems = dict((i, items.count(i)) for i in items)
return(uniqItems,countuniqItems)
def recalculatecoordinatesforKkit(mObjlist,xcord,ycord):
positionInfoExist = not(len(np.nonzero(xcord)[0]) == 0 \
and len(np.nonzero(ycord)[0]) == 0)
if positionInfoExist:
#Here all the object has been taken now recalculate and reassign back x and y co-ordinates
xmin = min(xcord)
xmax = max(xcord)
ymin = min(ycord)
ymax = max(ycord)
for merts in mObjlist:
objInfo = merts.path+'/info'
if moose.exists(objInfo):
Ix = (xyPosition(objInfo,'x')-xmin)/(xmax-xmin)
Iy = (ymin-xyPosition(objInfo,'y'))/(ymax-ymin)
element(objInfo).x = Ix*1000
element(objInfo).y = Iy*800
def xyPosition(objInfo,xory):
try:
return(float(element(objInfo).getField(xory)))
except ValueError:
return (float(0))
def autoCoordinates(meshEntry,srcdesConnection):
G = nx.Graph()
for cmpt,memb in meshEntry.items():
if memb in ["enzyme"]:
for enzObj in find_index(memb,'enzyme'):
#G.add_node(enzObj.path)
G.add_node(enzObj.path,label='',shape='ellipse',color='',style='filled',fontname='Helvetica',fontsize=12,fontcolor='blue')
for cmpt,memb in meshEntry.items():
#if memb.has_key
if memb in ["pool","cplx","reaction"]:
for poolObj in find_index(memb,'pool'):
#G.add_node(poolObj.path)
G.add_node(poolObj.path,label = poolObj.name,shape = 'box',color = '',style = 'filled',fontname = 'Helvetica',fontsize = 9,fontcolor = 'blue')
for cplxObj in find_index(memb,'cplx'):
G.add_node(cplxObj.path)
G.add_node(cplxObj.path,label = cplxObj.name,shape = 'box',color = '',style = 'filled',fontname = 'Helvetica',fontsize = 12,fontcolor = 'blue')
#G.add_edge((cplxObj.parent).path,cplxObj.path)
for reaObj in find_index(memb,'reaction'):
#G.add_node(reaObj.path)
G.add_node(reaObj.path,label='',shape='circle',color='')
for inn,out in srcdesConnection.items():
if (inn.className =='ZombieReac'): arrowcolor = 'green'
elif(inn.className =='ZombieEnz'): arrowcolor = 'red'
else: arrowcolor = 'blue'
if isinstance(out,tuple):
if len(out[0])== 0:
print (inn.className + ':' +inn.name + " doesn't have input message")
else:
for items in (items for items in out[0] ):
G.add_edge(element(items[0]).path,inn.path)
if len(out[1]) == 0:
print (inn.className + ':' + inn.name + "doesn't have output mssg")
else:
for items in (items for items in out[1] ):
G.add_edge(inn.path,element(items[0]).path)
elif isinstance(out,list):
if len(out) == 0:
print ("Func pool doesn't have sumtotal")
else:
for items in (items for items in out ):
G.add_edge(element(items[0]).path,inn.path)
position = graphviz_layout(G)
xcord, ycord = [],[]
for item in position.items():
xy = item[1]
xroundoff = round(xy[0],0)
yroundoff = round(xy[1],0)
xcord.append(xroundoff)
ycord.append(yroundoff)
xmin = min(xcord)
xmax = max(xcord)
ymin = min(ycord)
ymax = max(ycord)
for item in position.items():
xy = item[1]
anno = Annotator(item[0]+'/info')
Ax = (xy[0]-xmin)/(xmax-xmin)
Ay = (xy[1]-ymin)/(ymax-ymin)
#anno.x = round(Ax,1)
#anno.y = round(Ay,1)
#not roundingoff to max and min the co-ordinates for bigger model would overlay the co-ordinates
anno.x = xy[0]
anno.y = xy[1]
def find_index(value, key):
""" Value.get(key) to avoid expection which would raise if empty value in dictionary for a given key """
if value.get(key) != None:
return value.get(key)
else:
raise ValueError('no dict with the key found')
| gpl-3.0 | -3,179,577,821,720,425,000 | 39.821721 | 159 | 0.560765 | false |
shigin/radist | radist/content.py | 1 | 10660 | """This module contains method to handle varios URI scheme."""
import os, sys
import re
import itertools
import urllib2
import ftplib
import socket
import netrc
from warnings import warn
from radist.helpers import R_SUB2
scheme = re.compile('(?P<scheme>[a-z]+)://(?P<host>[a-z:0-9.]+)/(?P<path>.*)')
localhost = socket.gethostbyname(socket.gethostname())
def auth_from_wget(filename='~/.wgetrc'):
DUSER = 'anonymous'
DPASS = 'radist@rambler'
try:
wgetrc = open(os.path.expanduser(filename), 'r')
except IOError:
return DUSER, DPASS
d = {}
for line in wgetrc:
try:
key, val = line.split('#')[0].strip().split('=')
d[key] = val
except:
# who matter
pass
return d.get('user', DUSER), d.get('passwd', DPASS)
def get_auth(host):
info = get_auth.netrc.authenticators(host)
if info:
host, account, passwd = info
return host, passwd
else:
return get_auth.default
class FakeNetRC:
def auth_from_wget(self, host):
return None
try:
get_auth.netrc = netrc.netrc()
except IOError:
get_auth.netrc = FakeNetRC()
get_auth.default = auth_from_wget()
try:
from functools import partial
except ImportError:
def decorator_helper(func, wrapper):
"Sets doc, module, name and dict of wrapper from func."
wrapper.__doc__ = func.__doc__
wrapper.__module__ = func.__module__
try:
# builtin functions hasn't got __dict__
wrapper.__dict__.update(func.__dict__)
except:
pass
try:
# xml rpc function hasn't got __name__
wrapper.__name__ = func.__name__
except:
pass
def partial(func, *arg, **kwargs):
def nested(*args, **kwargs):
if kwargs or ckwargs:
dict = ckwargs.copy()
dict.update(kwargs)
else:
dict = ckwargs or kwargs
return func(*(cargs + args), **dict)
decorator_helper(func, nested)
return nested
class FileSocketProxy(object):
def __init__(self, socket):
self.__socket = socket
def __getattr__(self, prop):
attr = getattr(self.__socket, prop)
return attr
def write(self, str):
return self.__socket.send(str)
__all__ = ['get_file', 'adv_get_content', 'get_line_iterator', 'file_fabric']
class Content(object):
"""Abstract class for get_class fabric.
Childs of the class should returns True if can handle name.
Method get_content returns object with readlines and read methods.
"""
@staticmethod
def is_me(name):
"""Returns True if can handle name."""
return False
@staticmethod
def can_target(name):
"""Returns True if can create 'name' file"""
return False
@staticmethod
def get_host(name):
"""Returns a name of the host for 'name'."""
raise NotImplementedError('derived class should overload me')
@staticmethod
def get_file_write(name):
"Returns file--like object, which can write to file."
raise NotImplementedError('derived class should overload me')
@staticmethod
def get_content(name):
"""Returns object with readlines and read methods.
Usually it's 'file' object."""
raise NotImplementedError('derived class should overload me')
@staticmethod
def remote_get(name, src):
"""Copy file from src to remote target name."""
raise NotImplementedError('derived class should overload me')
class StdIn(Content):
"Represents stdin."
@staticmethod
def is_me(name):
return name in (None, '', '-')
@staticmethod
def get_content(name):
return sys.stdin
@staticmethod
def get_host(name):
return socket.gethostbyname(socket.gethostname())
class LocalFile(Content):
"Represents local file."
@staticmethod
def is_me(name):
return name.startswith('/') or \
name.startswith('file://') or \
name.startswith('~') or \
os.path.isfile(name)
@staticmethod
def can_target(name):
return True
@staticmethod
def get_file_write(name):
"Returns file--like object, which can write to file."
return open(name, 'w')
@staticmethod
def get_host(name):
return socket.gethostbyname(socket.gethostname())
@staticmethod
def get_content(name):
return open(os.path.expanduser(name), 'r')
class URLFile(Content):
"Files which can be accessed with urllib"
cache = {}
@staticmethod
def is_me(name):
return name.startswith('http://') or \
name.startswith('ftp://') or \
name.startswith('https://')
@staticmethod
def can_target(name):
return name.startswith('ftp://')
@staticmethod
def get_host(name):
parsed = scheme.match(name)
assert parsed
return parsed.groupdict()['host']
@staticmethod
def get_content(name):
return urllib2.urlopen(name)
@staticmethod
def get_path(name):
x = scheme.match(name)
if x:
return x.groupdict()['path']
else:
raise ValueError("can't match URI '%s'" % name)
@staticmethod
def get_file_write(name):
assert URLFile.can_target(name)
host = URLFile.get_host(name)
user, passwd = get_auth(host)
ftp = URLFile.cache.get('host', ftplib.FTP(host, user, passwd))
ftp.voidcmd('TYPE I')
conn = ftp.transfercmd('STOR ' + URLFile.get_path(name))
# ftp.voidresp()
return FileSocketProxy(conn)
class SVNFile(URLFile):
"Represents SVN file. It silly and should be removed!"
@staticmethod
def is_me(name):
return name.startswith('svn://')
@staticmethod
def get_content(name):
return urllib2.urlopen(name.replace('svn://', 'http://'))
class RadistFile(Content):
"Represent radist location"
@staticmethod
def is_me(name):
return name.startswith('ra://')
@staticmethod
def can_target(name):
return True
@staticmethod
def get_host(name):
node, path = RadistFile.get_tuple(name)
return socket.gethostbyname(node.get('server'))
@staticmethod
def get_scheme(scheme):
import default
schemes = {
'ix': default.get_ix,
'r': default.get_r,
}
if scheme not in schemes:
raise ValueError("don't know path '%s'" % scheme)
if not hasattr(RadistFile, '__' + scheme):
setattr(RadistFile, '__' + scheme, schemes[scheme]())
return getattr(RadistFile, '__' + scheme)
@staticmethod
def get_tuple(name):
"Returns tuple (RadistNode, path)"
sch, xname = name.split('://', 1)
assert sch == 'ra'
server, path = xname.split('/', 1)
if ':' in server:
raise TypeError("can't work with parameters")
scheme, rest = server.split('.', 1)
radist = RadistFile.get_scheme(scheme)
node = radist.get(rest.replace('.', '/'))
if not path.startswith('/'):
path = '%(dir)s/' + path
return node, path
@staticmethod
def get_content(name):
node, path = RadistFile.get_tuple(name)
command = "cat '%s'" % path.encode('string_escape')
stdin, stdout = node.r_popen2(command)
stdin.close()
return stdout
@staticmethod
def get_file_write(name):
"Returns file--like object, which can write to file."
node, path = RadistFile.get_tuple(name)
if RadistFile.get_host(name) == localhost:
return open(path, 'w')
else:
pid, stdin = node.r_exec("cat > " + path, flags=R_SUB2, stdout=sys.stdout)
return stdin
class FileFabric(object):
"""Class is an ease access to *File class."""
class Helper(object):
def __init__(self, class_, name):
self.__class = class_
self.__name = name
def __getattr__(self, prop):
attr = getattr(self.__class, prop)
if callable(attr):
return partial(attr, self.__name)
else:
return attr
def __init__(self):
self.__readers = []
module = sys.modules[__name__]
for class_ in dir(module):
content_obj = getattr(module, class_)
if type(content_obj) == type and issubclass(content_obj, Content):
self.__readers.append(content_obj)
def __call__(self, URI):
return self.get_wrapper(URI)
def add_reader(self, helper):
"Adds reader to query spool."
self.__readers.append(helper)
def get_class(self, URI):
"Returns class which can handle URI."
for reader in self.__readers:
if reader.is_me(URI):
return reader
raise ValueError('Unsupported URI')
def get_wrapper(self, URI):
"Returns a instance of the class with binded URI."
return FileFabric.Helper(self.get_class(URI), URI)
file_fabric = FileFabric()
def get_file(URI):
"""Opens URI and returns file object."""
return file_fabric.get_class(URI).get_content(URI)
def adv_get_content(URI=None, config=None):
"""Returns content of URI or splited by \\n config.
Example:
def parse(uri=None, config=None):
content = adv_get_content(uri, config)
"""
if URI != None:
content = get_file(URI)
elif config != None:
if isinstance(config, basestring):
content = config.split('\n')
elif hasattr(config, '__iter__'): # iterable
content = config
else:
raise exception("can't hadle config, it must be string or iterable object")
else:
raise exceptions.TypeError('config or URI must be specified')
return content
def get_line_iterator(iterable, special=[]):
"""Returns iterator over iterable.
Iterator returns lines without trailing '\\n' and without
'#' style comments.
"""
def helper(str):
"Helper str -> str"
pair = str.rstrip('\n').split('#', 1)
if special:
if len(pair) == 1:
return pair[0], None
else:
for i in special:
if pair[1].startswith(i):
return pair
return pair[0], None
else:
return pair[0]
return itertools.imap(helper, iterable)
| bsd-2-clause | -7,608,309,640,738,216,000 | 27.810811 | 87 | 0.57636 | false |
danakj/chromium | build/gyp_environment.py | 2 | 1214 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Sets up various automatic gyp environment variables. These are used by
gyp_chromium and landmines.py which run at different stages of runhooks. To
make sure settings are consistent between them, all setup should happen here.
"""
import gyp_helper
import mac_toolchain
import os
import sys
import vs_toolchain
def SetEnvironment():
"""Sets defaults for GYP_* variables."""
gyp_helper.apply_chromium_gyp_env()
# Default to ninja on linux and windows, but only if no generator has
# explicitly been set.
# Also default to ninja on mac, but only when not building chrome/ios.
# . -f / --format has precedence over the env var, no need to check for it
# . set the env var only if it hasn't been set yet
# . chromium.gyp_env has been applied to os.environ at this point already
if sys.platform.startswith(('linux', 'win', 'freebsd', 'darwin')) and \
not os.environ.get('GYP_GENERATORS'):
os.environ['GYP_GENERATORS'] = 'ninja'
vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs()
mac_toolchain.SetToolchainEnvironment()
| bsd-3-clause | -993,198,361,419,077,900 | 36.9375 | 77 | 0.740527 | false |
strongswan/strongTNC | apps/devices/paging.py | 1 | 5001 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import math
from .models import Device, Product
from apps.core.models import Session
from apps.devices.models import Device
from apps.swid.models import Event
from apps.front.paging import ProducerFactory
# PAGING PRODUCER
device_producer_factory = ProducerFactory(Device, 'description__icontains')
product_producer_factory = ProducerFactory(Product, 'name__icontains')
def device_session_list_producer(from_idx, to_idx, filter_query, dynamic_params=None,
static_params=None):
device_id = dynamic_params['device_id']
session_list = Session.objects.filter(device=device_id)
return session_list[from_idx:to_idx]
def device_session_stat_producer(page_size, filter_query, dynamic_params=None,
static_params=None):
device_id = dynamic_params['device_id']
count = Session.objects.filter(device=device_id).count()
return math.ceil(count / page_size)
def device_event_list_producer(from_idx, to_idx, filter_query, dynamic_params=None,
static_params=None):
device_id = dynamic_params['device_id']
event_list = Event.objects.filter(device=device_id)
return event_list[from_idx:to_idx]
def device_event_stat_producer(page_size, filter_query, dynamic_params=None,
static_params=None):
device_id = dynamic_params['device_id']
count = Event.objects.filter(device=device_id).count()
return math.ceil(count / page_size)
def device_vulnerability_list_producer(from_idx, to_idx, filter_query, dynamic_params=None,
static_params=None):
device_id = dynamic_params['device_id']
device = Device.objects.get(pk=device_id)
vulnerabilities = device.get_vulnerabilities()
return vulnerabilities[from_idx:to_idx]
def device_vulnerability_stat_producer(page_size, filter_query, dynamic_params=None,
static_params=None):
device_id = dynamic_params['device_id']
device = Device.objects.get(pk=device_id)
count = device.get_vulnerabilities().count()
return math.ceil(count / page_size)
def product_device_list_producer(from_idx, to_idx, filter_query, dynamic_params=None,
static_params=None):
if not dynamic_params:
return []
product_id = dynamic_params['product_id']
return Device.objects.filter(product__id=product_id)[from_idx:to_idx]
def product_device_stat_producer(page_size, filter_query, dynamic_params=None,
static_params=None):
if not dynamic_params:
return []
product_id = dynamic_params['product_id']
count = Device.objects.filter(product__id=product_id).count()
return math.ceil(count / page_size)
# PAGING CONFIGS
device_list_paging = {
'template_name': 'front/paging/default_list',
'list_producer': device_producer_factory.list(),
'stat_producer': device_producer_factory.stat(),
'static_producer_args': None,
'var_name': 'object_list',
'url_name': 'devices:device_detail',
'page_size': 50,
}
product_list_paging = {
'template_name': 'front/paging/default_list',
'list_producer': product_producer_factory.list(),
'stat_producer': product_producer_factory.stat(),
'static_producer_args': None,
'var_name': 'object_list',
'url_name': 'devices:product_detail',
'page_size': 50,
}
product_devices_list_paging = {
'template_name': 'devices/paging/device_list',
'list_producer': product_device_list_producer,
'stat_producer': product_device_stat_producer,
'url_name': 'devices:device_detail',
'page_size': 10,
}
device_session_list_paging = {
'template_name': 'devices/paging/device_report_sessions',
'list_producer': device_session_list_producer,
'stat_producer': device_session_stat_producer,
'static_producer_args': None,
'var_name': 'sessions',
'url_name': 'devices:session_detail',
'page_size': 10,
}
device_event_list_paging = {
'template_name': 'devices/paging/device_report_events',
'list_producer': device_event_list_producer,
'stat_producer': device_event_stat_producer,
'static_producer_args': None,
'var_name': 'events',
'url_name': 'devices:event_detail',
'page_size': 10,
}
device_vulnerability_list_paging = {
'template_name': 'devices/paging/device_report_vulnerabilities',
'list_producer': device_vulnerability_list_producer,
'stat_producer': device_vulnerability_stat_producer,
'static_producer_args': None,
'var_name': 'vulnerabilities',
'url_name': None,
'page_size': 10,
}
| agpl-3.0 | -8,092,494,106,131,860,000 | 34.978417 | 92 | 0.634473 | false |
openstack/sahara | sahara/utils/files.py | 1 | 1190 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
import pkg_resources as pkg
from sahara import version
def get_file_text(file_name, package='sahara'):
full_name = pkg.resource_filename(
package, file_name)
return open(full_name).read()
def get_file_binary(file_name):
full_name = pkg.resource_filename(
version.version_info.package, file_name)
return open(full_name, "rb").read()
def try_get_file_text(file_name, package='sahara'):
full_name = pkg.resource_filename(
package, file_name)
return (
open(full_name, "rb").read()
if path.isfile(full_name) else False)
| apache-2.0 | 3,584,808,584,476,616,000 | 28.75 | 69 | 0.708403 | false |
adviti/melange | thirdparty/google_appengine/google/storage/speckle/python/django/backend/base.py | 1 | 8172 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Django database backend for rdbms.
This acts as a simple wrapper around the MySQLdb database backend to utilize an
alternate settings.py configuration. When used in an application running on
Google App Engine, this backend will use the GAE Apiproxy as a communications
driver. When used with dev_appserver, or from outside the context of an App
Engine app, this backend will instead use a driver that communicates over the
Google API for SQL Service.
Communicating over Google API requires valid OAuth 2.0 credentials. Before
the backend can be used with this transport on dev_appserver, users should
first run the Django 'syncdb' management command (or any other of the commands
that interact with the database), and follow the instructions to obtain an
OAuth2 token and persist it to disk for subsequent use.
If you should need to manually force the selection of a particular driver
module, you can do so by specifying it in the OPTIONS section of the database
configuration in settings.py. For example:
DATABASES = {
'default': {
'ENGINE': 'google.storage.speckle.python.django.backend',
'INSTANCE': 'example.com:project:instance',
'NAME': 'mydb',
'USER': 'myusername',
'PASSWORD': 'mypassword',
'OPTIONS': {
'driver': 'google.storage.speckle.python.api.rdbms_googleapi',
}
}
}
"""
import logging
import os
import sys
from django.core import exceptions
from django.db.backends import signals
from django.utils import safestring
from google.storage.speckle.python.api import rdbms
from google.storage.speckle.python.django.backend import client
PROD_SERVER_SOFTWARE = 'Google App Engine'
modules_to_swap = (
'MySQLdb',
'MySQLdb.constants',
'MySQLdb.constants.CLIENT',
'MySQLdb.constants.FIELD_TYPE',
'MySQLdb.constants.FLAG',
'MySQLdb.converters',
)
old_modules = [(name, sys.modules.pop(name)) for name in modules_to_swap
if name in sys.modules]
sys.modules['MySQLdb'] = rdbms
try:
from google.third_party import python
python.MySQLdb = rdbms
for module_name in modules_to_swap:
module_name = 'google.third_party.python.' + module_name
old_modules.append((module_name, sys.modules.pop(module_name, None)))
sys.modules['google.third_party.python.MySQLdb'] = rdbms
except ImportError:
pass
from django.db.backends.mysql import base
for module_name, module in old_modules:
sys.modules[module_name] = module
_SETTINGS_CONNECT_ARGS = (
('HOST', 'dsn', False),
('INSTANCE', 'instance', True),
('NAME', 'database', True),
('USER', 'user', False),
('PASSWORD', 'password', False),
('OAUTH2_SECRET', 'oauth2_refresh_token', False),
('driver', 'driver_name', False),
('oauth_storage', 'oauth_storage', False),
)
def _GetDriver(driver_name=None):
"""Imports the driver module specified by the given module name.
If no name is given, this will attempt to automatically determine an
appropriate driver to use based on the current environment. When running on
a production App Engine instance, the ApiProxy driver will be used, otherwise,
the Google API driver will be used. This conveniently allows the backend to
be used with the same configuration on production, and with command line tools
like manage.py syncdb.
Args:
driver_name: The name of the driver module to import.
Returns:
The imported driver module, or None if a suitable driver can not be found.
"""
if not driver_name:
server_software = os.getenv('SERVER_SOFTWARE', '')
base_pkg_path = 'google.storage.speckle.python.api.'
if server_software.startswith(PROD_SERVER_SOFTWARE):
driver_name = base_pkg_path + 'rdbms_apiproxy'
else:
driver_name = base_pkg_path + 'rdbms_googleapi'
__import__(driver_name)
return sys.modules[driver_name]
def Connect(driver_name=None, oauth2_refresh_token=None, **kwargs):
"""Gets an appropriate connection driver, and connects with it.
Args:
driver_name: The name of the driver module to use.
oauth2_refresh_token: The OAuth2 refresh token used to aquire an access
token for authenticating requests made by the Google API driver; defaults
to the value provided by the GOOGLE_SQL_OAUTH2_REFRESH_TOKEN environment
variable, if present.
kwargs: Additional keyword arguments to pass to the driver's connect
function.
Returns:
An rdbms.Connection subclass instance.
Raises:
exceptions.ImproperlyConfigured: Valid OAuth 2.0 credentials could not be
found in storage and no oauth2_refresh_token was given.
"""
driver = _GetDriver(driver_name)
server_software = os.getenv('SERVER_SOFTWARE', '')
if server_software and driver.__name__.endswith('rdbms_googleapi'):
if server_software.startswith(PROD_SERVER_SOFTWARE):
logging.warning(
'Using the Google API driver is not recommended when running on '
'production App Engine. You should instead use the GAE API Proxy '
'driver (google.storage.speckle.python.api.rdbms_apiproxy).')
import oauth2client.client
from google.storage.speckle.python.api import rdbms_googleapi
from google.storage.speckle.python.django.backend import oauth2storage
storage = kwargs.setdefault('oauth_storage', oauth2storage.storage)
credentials = storage.get()
if credentials is None or credentials.invalid:
if not oauth2_refresh_token:
oauth2_refresh_token = os.getenv('GOOGLE_SQL_OAUTH2_REFRESH_TOKEN')
if not oauth2_refresh_token:
raise exceptions.ImproperlyConfigured(
'No valid OAuth 2.0 credentials. Before using the Google SQL '
'Service backend on dev_appserver, you must first run "manage.py '
'syncdb" and proceed through the given instructions to fetch an '
'OAuth 2.0 token.')
credentials = oauth2client.client.OAuth2Credentials(
None, rdbms_googleapi.CLIENT_ID, rdbms_googleapi.CLIENT_SECRET,
oauth2_refresh_token, None,
'https://accounts.google.com/o/oauth2/token',
rdbms_googleapi.USER_AGENT)
credentials.set_store(storage)
storage.put(credentials)
return driver.connect(**kwargs)
class DatabaseWrapper(base.DatabaseWrapper):
"""Django DatabaseWrapper for use with rdbms.
Overrides many pieces of the MySQL DatabaseWrapper for compatibility with
the rdbms API.
"""
vendor = 'rdbms'
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.client = client.DatabaseClient(self)
def _cursor(self):
if not self._valid_connection():
kwargs = {'conv': base.django_conversions, 'dsn': None}
settings_dict = self.settings_dict
settings_dict.update(settings_dict.get('OPTIONS', {}))
for settings_key, kwarg, required in _SETTINGS_CONNECT_ARGS:
value = settings_dict.get(settings_key)
if value:
kwargs[kwarg] = value
elif required:
raise exceptions.ImproperlyConfigured(
"You must specify a '%s' for database '%s'" %
(settings_key, self.alias))
self.connection = Connect(**kwargs)
encoders = {safestring.SafeUnicode: self.connection.encoders[unicode],
safestring.SafeString: self.connection.encoders[str]}
self.connection.encoders.update(encoders)
signals.connection_created.send(sender=self.__class__, connection=self)
cursor = base.CursorWrapper(self.connection.cursor())
return cursor
| apache-2.0 | -9,148,859,095,146,350,000 | 33.05 | 80 | 0.706681 | false |
imclab/confer | server/auth.py | 1 | 12729 | import json, sys, re, hashlib, smtplib, base64, urllib, os
from django.http import *
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from django.core.context_processors import csrf
from django.core.validators import email_re
from django.db.utils import IntegrityError
from django.utils.http import urlquote_plus
from multiprocessing import Pool
from utils import *
from models import *
p = os.path.abspath(os.path.dirname(__file__))
if(os.path.abspath(p+"/..") not in sys.path):
sys.path.append(os.path.abspath(p+"/.."))
'''
@author: Anant Bhardwaj
@date: Feb 12, 2012
'''
kLogIn = "SESSION_LOGIN"
kConf = "SESSION_CONF"
kName = "SESSION_NAME"
kFName = "SESSION_F_NAME"
kLName = "SESSION_L_NAME"
# for async calls
pool = Pool(processes=1)
'''
LOGIN/REGISTER/RESET
'''
def login_required (f):
def wrap (request, *args, **kwargs):
if kLogIn not in request.session.keys():
if(len(args)>0):
redirect_url = urlquote_plus("/%s/%s" %(args[0], f.__name__))
else:
redirect_url = "/"
return HttpResponseRedirect("/login?redirect_url=%s" %(redirect_url))
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
def login_form (request, redirect_url='/', errors=[]):
c = {'redirect_url':redirect_url, 'errors':errors, 'values':request.REQUEST}
c.update(csrf(request))
return render_to_response('login.html', c)
def register_form (request, redirect_url='/', errors=[]):
c = {'redirect_url':redirect_url, 'errors':errors, 'values':request.REQUEST}
c.update(csrf(request))
return render_to_response('register.html', c)
def login (request):
redirect_url = '/'
if('redirect_url' in request.GET.keys()):
redirect_url = urllib.unquote_plus(request.GET['redirect_url'])
if not redirect_url or redirect_url == '':
redirect_url = '/'
if request.method == "POST":
errors = []
login_email = ''
if('redirect_url' in request.POST.keys()):
redirect_url = urllib.unquote_plus(request.POST['redirect_url'])
try:
login_email = request.POST["login_email"].lower()
login_password = hashlib.sha1(request.POST["login_password"]).hexdigest()
user = User.objects.get(email=login_email, password=login_password)
clear_session(request)
request.session[kLogIn] = user.email
request.session[kName] = user.f_name
request.session[kFName] = user.f_name
request.session[kLName] = user.l_name
return HttpResponseRedirect(redirect_url)
except User.DoesNotExist:
try:
User.objects.get(email=login_email)
errors.append(
'Wrong password. Please try again.<br /><br />'
'<a class="blue bold" href="/forgot?email=%s">Click Here</a> '
'to reset your password.' %(urllib.quote_plus(login_email)))
except User.DoesNotExist:
errors.append(
'Could not find any account associated with email address: '
'<a href="mailto:%s">%s</a>.<br /><br /><a class="blue bold" '
'href="/register?redirect_url=%s&email=%s">Click Here</a> '
'to create an account.' %(login_email, login_email,
urllib.quote_plus(redirect_url), urllib.quote_plus(login_email)))
return login_form(
request, redirect_url = urllib.quote_plus(redirect_url),
errors = errors)
except:
errors.append('Login failed.')
return login_form(
request, redirect_url = urllib.quote_plus(redirect_url),
errors = errors)
else:
return login_form(request, urllib.quote_plus(redirect_url))
def register (request):
redirect_url = '/'
if('redirect_url' in request.GET.keys()):
redirect_url = urllib.unquote_plus(request.GET['redirect_url'])
if request.method == "POST":
errors = []
email = ''
try:
error = False
if('redirect_url' in request.POST.keys()):
redirect_url = urllib.unquote_plus(request.POST['redirect_url'])
email = request.POST["email"].lower()
password = request.POST["password"]
f_name = request.POST["f_name"]
l_name = request.POST["l_name"]
if(email_re.match(email.strip()) == None):
errors.append("Invalid Email.")
error = True
if(f_name.strip() == ""):
errors.append("Empty First Name.")
error = True
if(l_name.strip() == ""):
errors.append("Empty Last Name.")
error = True
if(password == ""):
errors.append("Empty Password.")
error = True
if(error):
return register_form(request, redirect_url = urllib.quote_plus(redirect_url), errors = errors)
hashed_password = hashlib.sha1(password).hexdigest()
user = User(email=email, password=hashed_password, f_name=f_name, l_name=l_name)
user.save()
clear_session(request)
request.session[kLogIn] = user.email
request.session[kName] = user.f_name
request.session[kFName] = user.f_name
request.session[kLName] = user.l_name
encrypted_email = encrypt_text(user.email)
subject = "Welcome to Confer"
msg_body = '''
Dear %s,
Thanks for registering to Confer.
Please click the link below to start using Confer:
http://confer.csail.mit.edu/verify/%s
''' % (user.f_name + ' ' + user.l_name, encrypted_email)
pool.apply_async(send_email, [user.email, subject, msg_body])
return HttpResponseRedirect(redirect_url)
except IntegrityError:
errors.append(
'Account already exists. Please <a class="blue bold" href="/login?login_email=%s">Log In</a>.'
% (urllib.quote_plus(email)))
return register_form(request, redirect_url = urllib.quote_plus(redirect_url), errors = errors)
except:
errors.append("Some error happened while trying to create an account. Please try again.")
return register_form(request, redirect_url = urllib.quote_plus(redirect_url), errors = errors)
else:
return register_form(request, redirect_url = urllib.quote_plus(redirect_url))
def clear_session (request):
request.session.flush()
if kLogIn in request.session.keys():
del request.session[kLogIn]
if kName in request.session.keys():
del request.session[kName]
if kFName in request.session.keys():
del request.session[kFName]
if kLName in request.session.keys():
del request.session[kLName]
def logout (request):
clear_session(request)
c = {
'msg_title': 'Thank you for using Confer!',
'msg_body': 'Your have been logged out.<br /><br /><ul><li><a class= "blue bold" href="/home">Click Here</a> to browse confer as guest.<br/><br /></li><li><a class= "blue bold" href="/login">Click Here</a> to log in again.</li></ul>'
}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
def forgot (request):
if request.method == "POST":
errors = []
try:
user_email = request.POST["email"].lower()
User.objects.get(email=user_email)
encrypted_email = encrypt_text(user_email)
subject = "Confer Password Reset"
msg_body = '''
Dear %s,
Please click the link below to reset your confer password:
http://confer.csail.mit.edu/reset/%s
''' % (user_email, encrypted_email)
pool.apply_async(send_email, [user_email, subject, msg_body])
c = {
'msg_title': 'Confer Reset Password',
'msg_body': 'A link to reset your password has been sent to your email address.'
}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
except User.DoesNotExist:
errors.append(
"Invalid Email Address.")
except:
errors.append(
'Some unknown error happened.'
'Please try again or send an email to '
'<a href="mailto:[email protected]">[email protected]</a>.')
c = {'errors': errors, 'values': request.POST}
c.update(csrf(request))
return render_to_response('forgot.html', c)
else:
c = {'values': request.REQUEST}
c.update(csrf(request))
return render_to_response('forgot.html', c)
def verify (request, encrypted_email):
errors = []
c = {'msg_title': 'Confer Account Verification'}
try:
user_email = decrypt_text(encrypted_email)
user = User.objects.get(email=user_email)
c.update({
'msg_body': 'Thanks for verifying your email address! <a class= "blue bold" href="/home">Click Here</a> to start using Confer.'
})
clear_session(request)
request.session[kLogIn] = user.email
request.session[kName] = user.f_name
request.session[kFName] = user.f_name
request.session[kLName] = user.l_name
except:
errors.append(
'Wrong verify code in the URL. '
'Please try again or send an email to '
'<a href="mailto:[email protected]">[email protected]</a>')
c.update({'errors': errors})
c.update(csrf(request))
return render_to_response('confirmation.html', c)
def reset (request, encrypted_email):
errors = []
error = False
if request.method == "POST":
try:
user_email = request.POST["user_email"].lower()
password = request.POST["new_password"]
password2 = request.POST["new_password2"]
if password == "":
errors.append("Empty Password.")
error = True
if password2 != password:
errors.append("Password and Confirm Password don't match.")
error = True
if error:
c = {
'user_email': user_email,
'encrypted_email': encrypted_email,
'errors': errors
}
c.update(csrf(request))
return render_to_response('reset.html', c)
else:
hashed_password = hashlib.sha1(password).hexdigest()
user = User.objects.get(email=user_email)
user.password = hashed_password
user.save()
c = {
'msg_title': 'Confer Reset Password',
'msg_body': 'Your password has been changed successfully.'
}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
except:
errors.append(
'Some unknown error happened. '
'Please try again or send an email to '
'<a href="mailto:[email protected]">[email protected]</a>')
c = {'errors': errors}
c.update(csrf(request))
return render_to_response('reset.html', c)
else:
try:
user_email = decrypt_text(encrypted_email)
User.objects.get(email=user_email)
c = {
'user_email': user_email,
'encrypted_email': encrypted_email
}
c.update(csrf(request))
return render_to_response('reset.html', c)
except:
errors.append(
'Wrong reset code in the URL. '
'Please try again or send an email to '
'<a href="mailto:[email protected]">[email protected]</a>')
c = {'msg_title': 'Confer Reset Password', 'errors': errors}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
@login_required
def settings (request):
errors = []
error = False
redirect_url = '/'
if('redirect_url' in request.GET.keys()):
redirect_url = request.GET['redirect_url']
if request.method == "POST":
try:
if('redirect_url' in request.POST.keys()):
redirect_url = request.POST['redirect_url']
user_email = request.POST["user_email"].lower()
meetups = request.POST["meetups_enabled"]
user = User.objects.get(email=user_email)
if meetups == 'enabled':
user.meetups_enabled = True
else:
user.meetups_enabled = False
user.save()
return HttpResponseRedirect(redirect_url)
except Exception, e:
errors.append(
'Some unknown error happened. '
'Please try again or send an email to '
'<a href="mailto:[email protected]">[email protected]</a>')
c = {'errors': errors}
c.update(csrf(request))
return render_to_response('settings.html', c)
else:
login = get_login(request)
user = User.objects.get(email=login[0])
meetups_enabled = user.meetups_enabled
c = {
'user_email': login[0],
'login_id': login[0],
'login_name': login[1],
'meetups_enabled': meetups_enabled,
'redirect_url': redirect_url}
c.update(csrf(request))
return render_to_response('settings.html', c)
def get_login(request):
login_id = None
login_name = ''
try:
login_id = request.session[kLogIn]
login_name = request.session[kName]
except:
pass
return [login_id, login_name]
| mit | -7,501,670,995,491,163,000 | 30.585608 | 237 | 0.623537 | false |
ingadhoc/odoo-support | server_mode/__manifest__.py | 1 | 1354 | ##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Server Mode',
'version': '12.0.1.0.0',
"author": "ADHOC SA",
"website": "www.adhoc.com.ar",
'license': 'AGPL-3',
"category": "Extra Tools",
'sequence': 10,
'images': [],
'depends': [
"web",
"web_environment_ribbon",
"fetchmail",
],
'demo': [],
'test': [],
'installable': True,
'auto_install': True,
'application': False,
}
| lgpl-3.0 | 2,360,634,318,469,988,000 | 33.717949 | 78 | 0.559823 | false |
PuckCh/battlenet | battlenet/__init__.py | 1 | 1307 | from .connection import Connection
from .constants import UNITED_STATES
from .constants import EUROPE
from .constants import KOREA
from .constants import TAIWAN
from .constants import CHINA
from .enums import RACE
from .enums import CLASS
from .enums import QUALITY
from .enums import RACE_TO_FACTION
from .enums import EXPANSION
from .enums import RAIDS
from .exceptions import APIError
from .exceptions import CharacterNotFound
from .exceptions import GuildNotFound
from .exceptions import RealmNotFound
from .things import Thing
from .things import LazyThing
from .things import Character
from .things import Title
from .things import Reputation
from .things import Stats
from .things import Appearance
from .things import Equipment
from .things import Build
from .things import Glyph
from .things import Instance
from .things import Boss
from .things import Profession
from .things import HunterPet
from .things import Guild
from .things import Emblem
from .things import Perk
from .things import Reward
from .things import Realm
from .things import EquippedItem
from .things import Class
from .things import Race
from .things import Raid
from .utils import normalize
from .utils import make_icon_url
from .utils import make_connection
from .utils import client_id
from .utils import client_secret
| mit | 3,427,076,931,640,688,000 | 25.673469 | 41 | 0.821729 | false |
apple/llvm-project | lldb/test/API/tools/lldb-server/TestGdbRemote_qThreadStopInfo.py | 5 | 5715 | import unittest2
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemote_qThreadStopInfo(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
THREAD_COUNT = 5
def gather_stop_replies_via_qThreadStopInfo(self, thread_count):
# Set up the inferior args.
inferior_args = []
for i in range(thread_count - 1):
inferior_args.append("thread:new")
inferior_args.append("sleep:10")
procs = self.prep_debug_monitor_and_inferior(
inferior_args=inferior_args)
# Assumes test_sequence has anything added needed to setup the initial state.
# (Like optionally enabling QThreadsInStopReply.)
self.test_sequence.add_log_lines([
"read packet: $c#63"
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Give threads time to start up, then break.
time.sleep(self.DEFAULT_SLEEP)
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
"read packet: {}".format(
chr(3)),
{
"direction": "send",
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
"capture": {
1: "stop_result",
2: "key_vals_text"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Wait until all threads have started.
threads = self.wait_for_thread_count(thread_count)
self.assertIsNotNone(threads)
# On Windows, there could be more threads spawned. For example, DebugBreakProcess will
# create a new thread from the debugged process to handle an exception event. So here we
# assert 'GreaterEqual' condition.
triple = self.dbg.GetSelectedPlatform().GetTriple()
if re.match(".*-.*-windows", triple):
self.assertGreaterEqual(len(threads), thread_count)
else:
self.assertEqual(len(threads), thread_count)
# Grab stop reply for each thread via qThreadStopInfo{tid:hex}.
stop_replies = {}
thread_dicts = {}
for thread in threads:
# Run the qThreadStopInfo command.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
"read packet: $qThreadStopInfo{:x}#00".format(thread),
{
"direction": "send",
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
"capture": {
1: "stop_result",
2: "key_vals_text"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Parse stop reply contents.
key_vals_text = context.get("key_vals_text")
self.assertIsNotNone(key_vals_text)
kv_dict = self.parse_key_val_dict(key_vals_text)
self.assertIsNotNone(kv_dict)
# Verify there is a thread and that it matches the expected thread
# id.
kv_thread = kv_dict.get("thread")
self.assertIsNotNone(kv_thread)
kv_thread_id = int(kv_thread, 16)
self.assertEqual(kv_thread_id, thread)
# Grab the stop id reported.
stop_result_text = context.get("stop_result")
self.assertIsNotNone(stop_result_text)
stop_replies[kv_thread_id] = int(stop_result_text, 16)
# Hang on to the key-val dictionary for the thread.
thread_dicts[kv_thread_id] = kv_dict
return (stop_replies, thread_dicts)
@skipIfNetBSD
def test_qThreadStopInfo_works_for_multiple_threads(self):
self.build()
self.set_inferior_startup_launch()
(stop_replies, _) = self.gather_stop_replies_via_qThreadStopInfo(self.THREAD_COUNT)
triple = self.dbg.GetSelectedPlatform().GetTriple()
# Consider one more thread created by calling DebugBreakProcess.
if re.match(".*-.*-windows", triple):
self.assertGreaterEqual(len(stop_replies), self.THREAD_COUNT)
else:
self.assertEqual(len(stop_replies), self.THREAD_COUNT)
@expectedFailureAll(oslist=["freebsd"], bugnumber="llvm.org/pr48418")
@expectedFailureNetBSD
def test_qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt(self):
self.build()
self.set_inferior_startup_launch()
(stop_replies, _) = self.gather_stop_replies_via_qThreadStopInfo(self.THREAD_COUNT)
self.assertIsNotNone(stop_replies)
no_stop_reason_count = sum(
1 for stop_reason in list(
stop_replies.values()) if stop_reason == 0)
with_stop_reason_count = sum(
1 for stop_reason in list(
stop_replies.values()) if stop_reason != 0)
# All but one thread should report no stop reason.
triple = self.dbg.GetSelectedPlatform().GetTriple()
# Consider one more thread created by calling DebugBreakProcess.
if re.match(".*-.*-windows", triple):
self.assertGreaterEqual(no_stop_reason_count, self.THREAD_COUNT - 1)
else:
self.assertEqual(no_stop_reason_count, self.THREAD_COUNT - 1)
# Only one thread should should indicate a stop reason.
self.assertEqual(with_stop_reason_count, 1)
| apache-2.0 | -9,150,978,879,917,957,000 | 39.531915 | 96 | 0.583552 | false |
RealTimeWeb/wikisite | MoinMoin/action/thread_monitor.py | 1 | 1932 | # -*- coding: iso-8859-1 -*-
"""
MoinMoin - Thread monitor action
Shows the current traceback of all threads.
@copyright: 2006 MoinMoin:AlexanderSchremmer
@license: GNU GPL, see COPYING for details.
"""
import os, time
from StringIO import StringIO
from MoinMoin import Page, wikiutil
from MoinMoin.util import thread_monitor
def execute_fs(pagename, request):
_ = request.getText
# check for superuser
if not request.user.isSuperUser():
request.theme.add_msg(_('You are not allowed to use this action.'), "error")
return Page.Page(request, pagename).send_page()
if thread_monitor.hook_enabled():
s = StringIO()
thread_monitor.trigger_dump(s)
time.sleep(5) # allow for all threads to dump to request
data = s.getvalue()
timestamp = time.time()
dump_fname = os.path.join(request.cfg.data_dir, "tm_%d.log" % timestamp)
f = file(dump_fname, "w")
f.write(data)
f.close()
else:
dump_fname = "nowhere"
request.write('<html><body>A dump has been saved to %s.</body></html>' % dump_fname)
def execute_wiki(pagename, request):
_ = request.getText
# be extra paranoid in dangerous actions
actname = __name__.split('.')[-1]
if not request.user.isSuperUser():
request.theme.add_msg(_('You are not allowed to use this action.'), "error")
return Page.Page(request, pagename).send_page()
request.theme.send_title("Thread monitor")
request.write('<pre>')
if not thread_monitor.hook_enabled():
request.write("Hook is not enabled.")
else:
s = StringIO()
thread_monitor.trigger_dump(s)
time.sleep(5) # allow for all threads to dump to request
request.write(wikiutil.escape(s.getvalue()))
request.write('</pre>')
request.theme.send_footer(pagename)
request.theme.send_closing_html()
execute = execute_fs
| apache-2.0 | 5,275,388,841,327,920,000 | 30.16129 | 88 | 0.64234 | false |
avrem/ardupilot | libraries/AP_Terrain/tools/create_terrain.py | 1 | 11287 | #!/usr/bin/env python
'''
create ardupilot terrain database files
'''
from MAVProxy.modules.mavproxy_map import srtm
import math, struct, os, sys
import crc16, time, struct
# MAVLink sends 4x4 grids
TERRAIN_GRID_MAVLINK_SIZE = 4
# a 2k grid_block on disk contains 8x7 of the mavlink grids. Each
# grid block overlaps by one with its neighbour. This ensures that
# the altitude at any point can be calculated from a single grid
# block
TERRAIN_GRID_BLOCK_MUL_X = 7
TERRAIN_GRID_BLOCK_MUL_Y = 8
# this is the spacing between 32x28 grid blocks, in grid_spacing units
TERRAIN_GRID_BLOCK_SPACING_X = ((TERRAIN_GRID_BLOCK_MUL_X-1)*TERRAIN_GRID_MAVLINK_SIZE)
TERRAIN_GRID_BLOCK_SPACING_Y = ((TERRAIN_GRID_BLOCK_MUL_Y-1)*TERRAIN_GRID_MAVLINK_SIZE)
# giving a total grid size of a disk grid_block of 32x28
TERRAIN_GRID_BLOCK_SIZE_X = (TERRAIN_GRID_MAVLINK_SIZE*TERRAIN_GRID_BLOCK_MUL_X)
TERRAIN_GRID_BLOCK_SIZE_Y = (TERRAIN_GRID_MAVLINK_SIZE*TERRAIN_GRID_BLOCK_MUL_Y)
# format of grid on disk
TERRAIN_GRID_FORMAT_VERSION = 1
IO_BLOCK_SIZE = 2048
GRID_SPACING = 100
def to_float32(f):
'''emulate single precision float'''
return struct.unpack('f', struct.pack('f',f))[0]
LOCATION_SCALING_FACTOR = to_float32(0.011131884502145034)
LOCATION_SCALING_FACTOR_INV = to_float32(89.83204953368922)
def longitude_scale(lat):
'''get longitude scale factor'''
scale = to_float32(math.cos(to_float32(math.radians(lat))))
return max(scale, 0.01)
def get_distance_NE_e7(lat1, lon1, lat2, lon2):
'''get distance tuple between two positions in 1e7 format'''
return ((lat2 - lat1) * LOCATION_SCALING_FACTOR, (lon2 - lon1) * LOCATION_SCALING_FACTOR * longitude_scale(lat1*1.0e-7))
def add_offset(lat_e7, lon_e7, ofs_north, ofs_east):
'''add offset in meters to a position'''
dlat = int(float(ofs_north) * LOCATION_SCALING_FACTOR_INV)
dlng = int((float(ofs_east) * LOCATION_SCALING_FACTOR_INV) / longitude_scale(lat_e7*1.0e-7))
return (int(lat_e7+dlat), int(lon_e7+dlng))
def east_blocks(lat_e7, lon_e7):
'''work out how many blocks per stride on disk'''
lat2_e7 = lat_e7
lon2_e7 = lon_e7 + 10*1000*1000
# shift another two blocks east to ensure room is available
lat2_e7, lon2_e7 = add_offset(lat2_e7, lon2_e7, 0, 2*GRID_SPACING*TERRAIN_GRID_BLOCK_SIZE_Y)
offset = get_distance_NE_e7(lat_e7, lon_e7, lat2_e7, lon2_e7)
return int(offset[1] / (GRID_SPACING*TERRAIN_GRID_BLOCK_SPACING_Y))
def pos_from_file_offset(lat_degrees, lon_degrees, file_offset):
'''return a lat/lon in 1e7 format given a file offset'''
ref_lat = int(lat_degrees*10*1000*1000)
ref_lon = int(lon_degrees*10*1000*1000)
stride = east_blocks(ref_lat, ref_lon)
blocks = file_offset // IO_BLOCK_SIZE
grid_idx_x = blocks // stride
grid_idx_y = blocks % stride
idx_x = grid_idx_x * TERRAIN_GRID_BLOCK_SPACING_X
idx_y = grid_idx_y * TERRAIN_GRID_BLOCK_SPACING_Y
offset = (idx_x * GRID_SPACING, idx_y * GRID_SPACING)
(lat_e7, lon_e7) = add_offset(ref_lat, ref_lon, offset[0], offset[1])
offset = get_distance_NE_e7(ref_lat, ref_lon, lat_e7, lon_e7)
grid_idx_x = int(idx_x / TERRAIN_GRID_BLOCK_SPACING_X)
grid_idx_y = int(idx_y / TERRAIN_GRID_BLOCK_SPACING_Y)
(lat_e7, lon_e7) = add_offset(ref_lat, ref_lon,
grid_idx_x * TERRAIN_GRID_BLOCK_SPACING_X * float(GRID_SPACING),
grid_idx_y * TERRAIN_GRID_BLOCK_SPACING_Y * float(GRID_SPACING))
return (lat_e7, lon_e7)
class GridBlock(object):
def __init__(self, lat_int, lon_int, lat, lon):
'''
a grid block is a structure in a local file containing height
information. Each grid block is 2048 bytes in size, to keep file IO to
block oriented SD cards efficient
'''
# crc of whole block, taken with crc=0
self.crc = 0
# format version number
self.version = TERRAIN_GRID_FORMAT_VERSION
# grid spacing in meters
self.spacing = GRID_SPACING
# heights in meters over a 32*28 grid
self.height = []
for x in range(TERRAIN_GRID_BLOCK_SIZE_X):
self.height.append([0]*TERRAIN_GRID_BLOCK_SIZE_Y)
# bitmap of 4x4 grids filled in from GCS (56 bits are used)
self.bitmap = (1<<56)-1
lat_e7 = int(lat * 1.0e7)
lon_e7 = int(lon * 1.0e7)
# grids start on integer degrees. This makes storing terrain data on
# the SD card a bit easier. Note that this relies on the python floor
# behaviour with integer division
self.lat_degrees = lat_int
self.lon_degrees = lon_int
# create reference position for this rounded degree position
ref_lat = self.lat_degrees*10*1000*1000
ref_lon = self.lon_degrees*10*1000*1000
# find offset from reference
offset = get_distance_NE_e7(ref_lat, ref_lon, lat_e7, lon_e7)
offset = (round(offset[0]), round(offset[1]))
# get indices in terms of grid_spacing elements
idx_x = int(offset[0] / GRID_SPACING)
idx_y = int(offset[1] / GRID_SPACING)
# find indexes into 32*28 grids for this degree reference. Note
# the use of TERRAIN_GRID_BLOCK_SPACING_{X,Y} which gives a one square
# overlap between grids
self.grid_idx_x = idx_x // TERRAIN_GRID_BLOCK_SPACING_X
self.grid_idx_y = idx_y // TERRAIN_GRID_BLOCK_SPACING_Y
# calculate lat/lon of SW corner of 32*28 grid_block
(ref_lat, ref_lon) = add_offset(ref_lat, ref_lon,
self.grid_idx_x * TERRAIN_GRID_BLOCK_SPACING_X * float(GRID_SPACING),
self.grid_idx_y * TERRAIN_GRID_BLOCK_SPACING_Y * float(GRID_SPACING))
self.lat = ref_lat
self.lon = ref_lon
def fill(self, gx, gy, altitude):
'''fill a square'''
self.height[gx][gy] = int(altitude)
def blocknum(self):
'''find IO block number'''
stride = east_blocks(self.lat_degrees*1e7, self.lon_degrees*1e7)
return stride * self.grid_idx_x + self.grid_idx_y
class DataFile(object):
def __init__(self, lat, lon):
if lat < 0:
NS = 'S'
else:
NS = 'N'
if lon < 0:
EW = 'W'
else:
EW = 'E'
name = "terrain/%c%02u%c%03u.DAT" % (NS, min(abs(int(lat)), 99),
EW, min(abs(int(lon)), 999))
try:
os.mkdir("terrain")
except Exception:
pass
if not os.path.exists(name):
self.fh = open(name, 'w+b')
else:
self.fh = open(name, 'r+b')
def seek_offset(self, block):
'''seek to right offset'''
# work out how many longitude blocks there are at this latitude
file_offset = block.blocknum() * IO_BLOCK_SIZE
self.fh.seek(file_offset)
def pack(self, block):
'''pack into a block'''
buf = bytes()
buf += struct.pack("<QiiHHH", block.bitmap, block.lat, block.lon, block.crc, block.version, block.spacing)
for gx in range(TERRAIN_GRID_BLOCK_SIZE_X):
buf += struct.pack("<%uh" % TERRAIN_GRID_BLOCK_SIZE_Y, *block.height[gx])
buf += struct.pack("<HHhb", block.grid_idx_x, block.grid_idx_y, block.lon_degrees, block.lat_degrees)
return buf
def write(self, block):
'''write a grid block'''
self.seek_offset(block)
block.crc = 0
buf = self.pack(block)
block.crc = crc16.crc16xmodem(buf)
buf = self.pack(block)
self.fh.write(buf)
def check_filled(self, block):
'''read a grid block and check if already filled'''
self.seek_offset(block)
buf = self.fh.read(IO_BLOCK_SIZE)
if len(buf) != IO_BLOCK_SIZE:
return False
(bitmap, lat, lon, crc, version, spacing) = struct.unpack("<QiiHHH", buf[:22])
if (version != TERRAIN_GRID_FORMAT_VERSION or
abs(lat - block.lat)>2 or
abs(lon - block.lon)>2 or
spacing != GRID_SPACING or
bitmap != (1<<56)-1):
return False
buf = buf[:16] + struct.pack("<H", 0) + buf[18:]
crc2 = crc16.crc16xmodem(buf[:1821])
if crc2 != crc:
return False
return True
def create_degree(lat, lon):
'''create data file for one degree lat/lon'''
lat_int = int(math.floor(lat))
lon_int = int(math.floor((lon)))
tiles = {}
dfile = DataFile(lat_int, lon_int)
print("Creating for %d %d" % (lat_int, lon_int))
total_blocks = east_blocks(lat_int*1e7, lon_int*1e7) * 47
for blocknum in range(total_blocks):
(lat_e7, lon_e7) = pos_from_file_offset(lat_int, lon_int, blocknum * IO_BLOCK_SIZE)
lat = lat_e7 * 1.0e-7
lon = lon_e7 * 1.0e-7
grid = GridBlock(lat_int, lon_int, lat, lon)
if grid.blocknum() != blocknum:
continue
if not args.force and dfile.check_filled(grid):
continue
for gx in range(TERRAIN_GRID_BLOCK_SIZE_X):
for gy in range(TERRAIN_GRID_BLOCK_SIZE_Y):
lat_e7, lon_e7 = add_offset(lat*1.0e7, lon*1.0e7, gx*GRID_SPACING, gy*GRID_SPACING)
lat2_int = int(math.floor(lat_e7*1.0e-7))
lon2_int = int(math.floor(lon_e7*1.0e-7))
tile_idx = (lat2_int, lon2_int)
while not tile_idx in tiles:
tile = downloader.getTile(lat2_int, lon2_int)
waited = False
if tile == 0:
print("waiting on download of %d,%d" % (lat2_int, lon2_int))
time.sleep(0.3)
waited = True
continue
if waited:
print("downloaded %d,%d" % (lat2_int, lon2_int))
tiles[tile_idx] = tile
altitude = tiles[tile_idx].getAltitudeFromLatLon(lat_e7*1.0e-7, lon_e7*1.0e-7)
grid.fill(gx, gy, altitude)
dfile.write(grid)
from argparse import ArgumentParser
parser = ArgumentParser(description='terrain data creator')
parser.add_argument("lat", type=float, default=-35.363261)
parser.add_argument("lon", type=float, default=149.165230)
parser.add_argument("--force", action='store_true', help="overwrite existing full blocks")
parser.add_argument("--radius", type=int, default=100, help="radius in km")
parser.add_argument("--debug", action='store_true', default=False)
parser.add_argument("--spacing", type=int, default=100, help="grid spacing in meters")
args = parser.parse_args()
downloader = srtm.SRTMDownloader(debug=args.debug)
downloader.loadFileList()
GRID_SPACING = args.spacing
done = set()
for dx in range(-args.radius, args.radius):
for dy in range(-args.radius, args.radius):
(lat2,lon2) = add_offset(args.lat*1e7, args.lon*1e7, dx*1000.0, dy*1000.0)
lat_int = int(round(lat2 * 1.0e-7))
lon_int = int(round(lon2 * 1.0e-7))
tag = (lat_int, lon_int)
if tag in done:
continue
done.add(tag)
create_degree(lat_int, lon_int)
create_degree(args.lat, args.lon)
| gpl-3.0 | 3,621,082,822,801,622,500 | 36.749164 | 124 | 0.602552 | false |
sxslex/rows | rows/utils.py | 1 | 4354 | # coding: utf-8
# Copyright 2014-2015 Álvaro Justen <https://github.com/turicas/rows/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import os
import tempfile
from collections import Iterator
from unicodedata import normalize
import requests
import rows
# TODO: create functions to serialize/deserialize data
SLUG_CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_'
def slug(text, encoding=None, separator='_', permitted_chars=SLUG_CHARS,
replace_with_separator=' -_'):
if isinstance(text, str):
text = text.decode(encoding or 'ascii')
clean_text = text.strip()
for char in replace_with_separator:
clean_text = clean_text.replace(char, separator)
double_separator = separator + separator
while double_separator in clean_text:
clean_text = clean_text.replace(double_separator, separator)
ascii_text = normalize('NFKD', clean_text).encode('ascii', 'ignore')
strict_text = [x for x in ascii_text if x in permitted_chars]
text = ''.join(strict_text).lower()
if text.startswith(separator):
text = text[len(separator):]
if text.endswith(separator):
text = text[:-len(separator)]
return text
def ipartition(iterable, partition_size):
if not isinstance(iterable, Iterator):
iterator = iter(iterable)
else:
iterator = iterable
finished = False
while not finished:
data = []
for _ in range(partition_size):
try:
data.append(iterator.next())
except StopIteration:
finished = True
break
yield data
def download_file(uri):
response = requests.get(uri)
content = response.content
# TODO: try to guess with uri.split('/')[-1].split('.')[-1].lower()
try:
content_type = response.headers['content-type']
plugin_name = content_type.split('/')[-1]
except (KeyError, IndexError):
try:
plugin_name = uri.split('/')[-1].split('.')[-1].lower()
except IndexError:
raise RuntimeError('Could not identify file type.')
tmp = tempfile.NamedTemporaryFile()
filename = '{}.{}'.format(tmp.name, plugin_name)
tmp.close()
with open(filename, 'wb') as fobj:
fobj.write(content)
return filename
def get_uri_information(uri):
if uri.startswith('http://') or uri.startswith('https://'):
should_delete = True
filename = download_file(uri)
else:
should_delete = False
filename = uri
plugin_name = filename.split('.')[-1].lower()
if plugin_name == 'htm':
plugin_name = 'html'
elif plugin_name == 'text':
plugin_name = 'txt'
elif plugin_name == 'json':
plugin_name = 'pjson'
return should_delete, filename, plugin_name
def import_from_uri(uri, *args, **kwargs):
# TODO: support '-' also
should_delete, filename, plugin_name = get_uri_information(uri)
try:
import_function = getattr(rows, 'import_from_{}'.format(plugin_name))
except AttributeError:
raise ValueError('Plugin (import) "{}" not found'.format(plugin_name))
with open(filename) as fobj:
table = import_function(fobj, *args, **kwargs)
if should_delete:
os.unlink(filename)
return table
def export_to_uri(uri, table, *args, **kwargs):
# TODO: support '-' also
plugin_name = uri.split('.')[-1].lower()
try:
export_function = getattr(rows, 'export_to_{}'.format(plugin_name))
except AttributeError:
raise ValueError('Plugin (export) "{}" not found'.format(plugin_name))
export_function(table, uri, *args, **kwargs)
| gpl-3.0 | -4,946,662,462,204,558,000 | 29.229167 | 78 | 0.643924 | false |
bpwook/auto-cert-kit | autocertkit/testbase.py | 1 | 16772 | # Copyright (c) Citrix Systems Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""Module for base test clasess from which test cases are derived"""
import traceback
import re
import signal
from utils import *
log = get_logger('auto-cert-kit')
class TestClass(object):
"""The base test class for defining attributes
and methods that all other test classes must have
or override"""
config = {}
collects = []
static_managers = {}
tags = []
caps = [REQ_CAP]
order = 1
required = True
required_config = []
session = None
base_tag = "Base"
XS_REQ = ">= 6.0"
XCP_REQ = ">= 1.0"
REQUIRED_FOR = None
def __init__(self, session, config):
"""The constructor method.
We expect to be passed a dictionary object containing
global config used by each test case"""
self.config = config
self.session = session
#Take a copy of the tag list and then append.
self.tags = list(self.tags)
self.tags.append(self.base_tag)
self.extra_init()
def get_tags(self):
return self.tags
def extra_init(self):
"""Can be overriden by subclasses to perform
extra initialisation"""
# Make sure we only run this on test run.
if 'device_config' in self.config.keys():
self.generate_static_net_conf()
def host_setup(self):
"""Method for running setup commands on a host
before executing the tests. This may include
operations that require a reboot. The test runner
will handle re-executing the current test case
when booting has finished"""
return
def run(self, debug=False, test_name=None):
"""Method for running all the tests in a class"""
self.check_prerequisites()
self.host_setup()
results = []
tests = self.list_tests()
if test_name:
arr = test_name.split('.')
test_name = arr[len(arr)-1]
log.debug("Test Selected = %s" % test_name)
for test in tests:
if test_name and test_name != test:
continue
# This assumes that we do not keep IPs across individual tests
for vlan, sm in self.static_managers.iteritems():
sm.release_all()
# Release Alarm signal to prevent handled signal from previous test
# interrupts this test. When there is no SIG_ALRM, this does nothing.
signal.alarm(0)
rec = {}
try:
log.debug("******** %s.%s ********" % (self.__class__.__name__, test))
res = getattr(self, test)(self.session)
# If test executed without failure it can be either skipped or passed.
if 'skipped' in res and res['skipped']:
rec['result'] = 'skip'
else:
rec['result'] = 'pass'
def copy_field(rec, res, field, keep_tag = True):
if field in res:
rec[field] = res[field]
elif keep_tag:
rec[field] = ""
copy_field(rec, res, 'info')
copy_field(rec, res, 'data')
copy_field(rec, res, 'config')
copy_field(rec, res, 'reason', False)
copy_field(rec, res, 'warning', False)
except Exception, e:
traceb = traceback.format_exc()
rec['result'] = 'fail'
rec['traceback'] = traceb
rec['exception'] = str(e)
log.error("Test Case Failure: %s" % str(test))
log.debug(traceb)
if debug:
log.debug("Running in debug mode - exiting due to failure: %s" % str(e))
sys.exit(0)
except:
traceb = traceback.format_exc()
exception = True
rec['result'] = 'fail'
rec['trackeback'] = traceb
rec['exception'] = "Unexpected error: %s" % sys.exc_info()[0]
log.debug(traceb)
if debug:
log.debug("Running in debug mode - exiting due to failure: %s" % sys.exc_info()[0])
sys.exit(0)
log.debug("Test case %s: %s.%s" % (rec['result'], self.__class__.__name__, test))
rec['test_name'] = "%s.%s" % (self.__class__.__name__, test)
results.append(rec)
pool_wide_cleanup(self.session)
return results
def check_prerequisites(self):
"""Check that the class has met it's prerequisites
this is achieved by ensuring that for all 'required_config'
keys, an entry is found in the config dict object"""
for tag in self.required_config:
log.debug("Checking for %s" % tag)
if tag not in self.config or not self.config[tag]:
raise Exception("Prerequisite '%s' has not been passed to this object" % tag)
else:
log.debug("Tag %s: %s" % (tag, self.config[tag]))
xs_version = get_xenserver_version(self.session)
if eval_expr(self.XS_REQ, xs_version):
return
xcp_version = get_xcp_version(self.session)
if eval_expr(self.XCP_REQ, xcp_version):
return
raise Exception("versions do not meet requirements.")
def list_tests(self):
"""Return a list of tests contained within this class"""
method_list = [method for method in dir(self)
if callable(getattr(self,method))
and method.startswith('test')]
return method_list
def is_required(self):
"""Returns True by default, false if the test is optional"""
return self.required
def get_required_config(self):
"""Returns a list of parameters required for running
the test cases with this class"""
return self.required_config
def generate_static_net_conf(self):
log.debug("Config: %s" % self.config)
netconf = self.get_netconf()
log.debug("Netconf: %s" % netconf)
netid_rec = {}
for iface, rec in netconf.iteritems():
if iface.startswith('eth'):
log.debug("iface: %s Rec: %s" % (iface, rec))
nid = rec['network_id']
# Required for initialisation
if nid not in netid_rec:
netid_rec[nid] = []
# Append interface on that network id
netid_rec[nid].append(iface)
res = {}
regex = re.compile(r'static_(?P<netid>\d+)_(?P<vlan>\d+)')
# Iterate through the network config structure to
# see if we have any static managers to initialise.
for k, v in self.get_netconf().iteritems():
# We only care about vlans on the physical network ID this test is running on
match = regex.search(k)
if match:
network_id = int(match.group('netid'))
vlan = match.group('vlan')
log.debug("Static Config Record for Netid %d and Vlan %s" % \
(network_id, vlan))
sm = StaticIPManager(v)
# We must assign this static manager to all of the network references
# which have the netid that has been specified.
if network_id in netid_rec.keys():
for iface in netid_rec[network_id]:
log.debug("Create static config for %s (%s)" % (iface, vlan))
key_name = "%s_%s" % (iface, vlan)
assert(key_name not in res.keys())
res[key_name] = sm
log.debug("Added static conf for '%s'" % key_name)
self.static_managers = res
log.debug("Static Managers Created: %s" % self.static_managers)
def get_static_manager(self, network_ref, vlan='0'):
"""By default, return the zero'th VLAN static ip manager
if it exists, otherwise just return None."""
log.debug("get_static_manager: %s %s" % (network_ref, vlan))
log.debug("All static recs: %s" % self.static_managers)
devices = get_physical_devices_by_network(self.session, network_ref)
# Note: we expect two devices for the case where we have created
# a bond between two PIFs.
if len(devices) > 2:
raise Exception("Error: more than two devices " \
+ "for network %s: %s" % (network_ref, devices))
# In the case of a bond, we do not mind which device is used.
iface = devices.pop()
key = "%s_%s" % (iface, vlan)
if key in self.static_managers.keys():
return self.static_managers[key]
else:
return None
def get_vlans(self, iface):
""" For a specified ethernet interface, return the list of
VLANs that the user has declared to be in operation."""
netconf = eval(self.config['netconf'])
if iface not in netconf:
raise Exception("The interface %s has not been defined in the network config file. (%s)" %
(iface, netconf))
return netconf[iface]['vlan_ids']
def get_netconf(self):
"""Return the network config dictionary, as provided by the user"""
return eval(self.config['netconf'])
def singlenicmode(self):
return 'singlenic' in self.config.keys() and self.config['singlenic'] == 'true'
def get_equivalent_devices(self):
"""Return a list of interfaces presented by devices with the same PCI ID as
the one currently being tested by the testkit"""
equiv_ifaces = intersection(get_equivalent_devices(self.session,
self.config['device_config']),
self.get_netconf().keys())
log.debug("Equivalent devices for %s: %s" % (self.config['device_config']['Kernel_name'],
equiv_ifaces))
return equiv_ifaces
def get_pifs_to_use(self):
equiv_devs = self.get_equivalent_devices()
try:
return filter_pif_devices(self.session, equiv_devs)
except Exception, e:
log.error("Caught Exception - may be OK if running in single NIC mode.")
log.error("Exception Occurred: %s" % str(e))
if self.singlenicmode():
return equiv_devs
else:
raise e
def get_networks(self):
"""Take in a list of available devices to use for testing
and return a list of network references."""
device_list = self.get_equivalent_devices()
if self.singlenicmode():
devices = device_list
else:
#Get no management ethernet devices
devices = filter_pif_devices(self.session, device_list)
results = []
for device in devices:
#Array access exception would be raised by filter_pif_devices
pifs = get_pifs_by_device(self.session, device)
#Assumption that two PIFs are on the same network
network_ref = self.session.xenapi.PIF.get_network(pifs[0])
if len(pifs) > 1:
for pif in pifs[1:]:
if self.session.xenapi.PIF.get_network(pif) != network_ref:
raise Exception("Assumption that identical devices " +
"in a pool are attached to the same " +
"network is invalid!")
results.append(network_ref)
#Raise an exception if no networks have been found
if not len(results):
raise Exception("No non-management networks have been found")
return results
class NetworkTestClass(TestClass):
"""Sub class for Network Tests"""
base_tag = 'NA'
network_backend = 'vswitch'
num_ips_required = 0
def host_setup(self):
"""Overload setup function. Setup networking backend"""
master_ref = get_pool_master(self.session)
host_refs = self.session.xenapi.host.get_all()
for host_ref in host_refs:
oc = self.session.xenapi.host.get_other_config(host_ref)
default_routes_key = 'default_routes'
if default_routes_key not in oc.keys():
routes = get_network_routes(self.session, host_ref)
route_recs = [route.get_record() for route in routes]
oc[default_routes_key] = str(route_recs)
self.session.xenapi.host.set_other_config(host_ref, oc)
def plugin_call(method, args):
return self.session.xenapi.host.call_plugin(master_ref,
'autocertkit',
method,
args)
backend = plugin_call('get_network_backend', {})
log.debug("Current network backend: %s" % backend)
log.debug("self.network_backend %s" % self.network_backend)
if self.network_backend == 'vswitch' and backend == 'bridge':
#Switch backend to vswitch
plugin_call('set_network_backend_pool', {'backend': 'openvswitch'})
host_reboot(self.session)
elif self.network_backend == 'bridge' and backend == 'openvswitch':
#Switch backend to bridge
plugin_call('set_network_backend_pool', {'backend': 'bridge'})
host_reboot(self.session)
#Nothing to do, just return
return
def get_bondable_ifaces(self, iface):
""" Given a particular interface, return a list of other network
interfaces which have been defined as being on the same physical L2 network."""
netconf = self.get_netconf()
phy_id = netconf[iface]['network_id']
log.debug("NetConf: '%s'" % netconf)
# Construct a list of interface names who have the same physical ID
# as the provided interface.
blist = intersection([k for k, v in netconf.iteritems() if k.startswith('eth') and
v['network_id'] == phy_id],
netconf.keys())
# Need to remove any occurances of the given interface, as we can't bond
# with ourselves.
while blist.count(iface) != 0:
blist.remove(iface)
return blist
def get_primary_bond_iface(self):
"""For the device currently being tested, return all the interface which are equivalent,
and can be bonded"""
res = []
# Only return interfaces which have more than bondable interface
for iface in self.get_equivalent_devices():
if self.get_bondable_ifaces(iface):
res.append(iface)
return res
class LocalStorageTestClass(TestClass):
"""Sub class for storage tests"""
base_tag = 'LS'
class CPUTestClass(TestClass):
"""Sub class for CPU tests"""
base_tag = 'CPU'
class OperationsTestClass(TestClass):
"""Sub class for Operations tests"""
base_tag = 'OP'
| bsd-2-clause | 6,737,944,345,618,041,000 | 38.463529 | 103 | 0.563797 | false |
Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_02_01/operations/_queue_operations.py | 1 | 22466 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class QueueOperations(object):
"""QueueOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create(
self,
resource_group_name, # type: str
account_name, # type: str
queue_name, # type: str
queue, # type: "_models.StorageQueue"
**kwargs # type: Any
):
# type: (...) -> "_models.StorageQueue"
"""Creates a new queue with the specified queue name, under the specified account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param queue_name: A queue name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of lowercase alphanumeric and dash(-) characters only,
it should begin and end with an alphanumeric character and it cannot have two consecutive
dash(-) characters.
:type queue_name: str
:param queue: Queue properties and metadata to be created with.
:type queue: ~azure.mgmt.storage.v2021_02_01.models.StorageQueue
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageQueue, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.StorageQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageQueue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'queueName': self._serialize.url("queue_name", queue_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z0-9]([a-z0-9]|(-(?!-))){1,61}[a-z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(queue, 'StorageQueue')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
account_name, # type: str
queue_name, # type: str
queue, # type: "_models.StorageQueue"
**kwargs # type: Any
):
# type: (...) -> "_models.StorageQueue"
"""Creates a new queue with the specified queue name, under the specified account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param queue_name: A queue name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of lowercase alphanumeric and dash(-) characters only,
it should begin and end with an alphanumeric character and it cannot have two consecutive
dash(-) characters.
:type queue_name: str
:param queue: Queue properties and metadata to be created with.
:type queue: ~azure.mgmt.storage.v2021_02_01.models.StorageQueue
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageQueue, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.StorageQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageQueue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'queueName': self._serialize.url("queue_name", queue_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z0-9]([a-z0-9]|(-(?!-))){1,61}[a-z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(queue, 'StorageQueue')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
account_name, # type: str
queue_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.StorageQueue"
"""Gets the queue with the specified queue name, under the specified account if it exists.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param queue_name: A queue name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of lowercase alphanumeric and dash(-) characters only,
it should begin and end with an alphanumeric character and it cannot have two consecutive
dash(-) characters.
:type queue_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageQueue, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.StorageQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageQueue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'queueName': self._serialize.url("queue_name", queue_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z0-9]([a-z0-9]|(-(?!-))){1,61}[a-z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
account_name, # type: str
queue_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes the queue with the specified queue name, under the specified account if it exists.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param queue_name: A queue name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of lowercase alphanumeric and dash(-) characters only,
it should begin and end with an alphanumeric character and it cannot have two consecutive
dash(-) characters.
:type queue_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'queueName': self._serialize.url("queue_name", queue_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z0-9]([a-z0-9]|(-(?!-))){1,61}[a-z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
account_name, # type: str
maxpagesize=None, # type: Optional[str]
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListQueueResource"]
"""Gets a list of all the queues under the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param maxpagesize: Optional, a maximum number of queues that should be included in a list
queue response.
:type maxpagesize: str
:param filter: Optional, When specified, only the queues with a name starting with the given
filter will be listed.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListQueueResource or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_02_01.models.ListQueueResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListQueueResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if maxpagesize is not None:
query_parameters['$maxpagesize'] = self._serialize.query("maxpagesize", maxpagesize, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListQueueResource', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues'} # type: ignore
| mit | 1,946,883,023,803,050,200 | 51.861176 | 214 | 0.645153 | false |
deseret-tech/litecoin-python | src/litecoinrpc/config.py | 1 | 2715 | # Copyright (c) 2010 Witchspace <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Utilities for reading litecoin configuration files.
"""
def read_config_file(filename):
"""
Read a simple ``'='``-delimited config file.
Raises :const:`IOError` if unable to open file, or :const:`ValueError`
if an parse error occurs.
"""
f = open(filename)
try:
cfg = {}
for line in f:
line = line.strip()
if line and not line.startswith("#"):
try:
(key, value) = line.split('=', 1)
cfg[key] = value
except ValueError:
pass # Happens when line has no '=', ignore
finally:
f.close()
return cfg
def read_default_config(filename=None):
"""
Read litecoin default configuration from the current user's home directory.
Arguments:
- `filename`: Path to a configuration file in a non-standard location (optional)
"""
if filename is None:
import os
import platform
home = os.getenv("HOME")
if not home:
raise IOError("Home directory not defined, don't know where to look for config file")
if platform.system() == "Darwin":
location = 'Library/Application Support/Litecoin/litecoin.conf'
else:
location = '.litecoin/litecoin.conf'
filename = os.path.join(home, location)
elif filename.startswith("~"):
import os
filename = os.path.expanduser(filename)
try:
return read_config_file(filename)
except (IOError, ValueError):
pass # Cannot read config file, ignore
| mit | 7,992,244,416,752,432,000 | 35.2 | 97 | 0.659669 | false |
goosechooser/cps2-zmq | cps2zmq/gather/Broker.py | 1 | 10126 | # pylint: disable=E1101
"""
Contains Broker, WorkerRepresentative, and ServiceQueue classes.
"""
import sys
import logging
import zmq
from zmq.eventloop.zmqstream import ZMQStream
from zmq.eventloop.ioloop import IOLoop, PeriodicCallback
from cps2zmq.gather import mdp, log
HB_INTERVAL = 1000
HB_LIVENESS = 3
class Broker(object):
"""
MameServer receives messages sent by an instance of MAME, and passes it to workers \
for processing.
Attributes:
context (:obj:`zmq.Context`): required by ZMQ to make the magic happen.
port (str): the port the serversub socket binds to.
serversub (:obj:`zmq.Context.socket`): A zmq socket set to SUB.\
MameClients connect and send messages here.
toworkers (str): the address to push work out on
backend (:obj:`zmq.Context.socket`): A zmq socket set to ROUTER. \
Routes work to the worker that requested it.
backstream (:obj:`zmq.eventloop.zmqstream.ZMQStream`): Used for registering callbacks \
with the backend socket.
msgs_recv (int): Total number of messages received.
workers (list of threads): Pool to keep track of workers.
"""
WPROTOCOL = b'MDPW01'
msgs_recv = 0
def __init__(self, front_addr, toworkers, log_to_file=False):
loop = IOLoop.instance()
context = zmq.Context.instance()
self.front_addr = front_addr
front = context.socket(zmq.ROUTER)
front.setsockopt(zmq.LINGER, 0)
back = context.socket(zmq.ROUTER)
back.setsockopt(zmq.LINGER, 0)
self.frontstream = ZMQStream(front, loop)
self.frontstream.on_recv(self.handle_frontend)
self.frontstream.bind(front_addr)
self.backstream = ZMQStream(back, loop)
self.backstream.on_recv(self.handle_backend)
self.backstream.bind(toworkers)
self._logger = None
self.workers = {}
self.services = {}
self.heartbeater = None
self.setup_logging(log_to_file)
def setup(self):
"""
Sets up the heartbeater callback.
"""
self.heartbeater = PeriodicCallback(self.beat, HB_INTERVAL)
self.heartbeater.start()
def setup_logging(self, log_to_file):
name = self.__class__.__name__
self._logger = log.configure(name, fhandler=log_to_file)
def shutdown(self):
"""
Closes all associated zmq sockets and streams.
"""
self._logger.info('Closing\n')
if self.frontstream:
self.frontstream.socket.close()
self.frontstream.close()
self.frontstream = None
if self.backstream:
self.backstream.socket.close()
self.backstream.close()
self.backstream = None
if self.heartbeater:
self.heartbeater.stop()
self.heartbeater = None
self.workers = {}
self.services = {}
def start(self):
"""
Start the server
"""
self._logger.info('Starting at address %s', self.front_addr)
self.setup()
IOLoop.instance().start()
def report(self):
self._logger.info('Received %s messages', self.msgs_recv)
def beat(self):
"""
Checks for dead workers and removes them.
"""
for w in list(self.workers.values()):
if not w.is_alive():
self.unregister_worker(w.idn)
def register_worker(self, idn, service):
"""
Registers any worker who sends a READY message.
Allows the broker to keep track of heartbeats.
Args:
idn (bytes): the id of the worker.
service (byte-string): the service the work does work for.
"""
self._logger.info('Registering worker %s', idn)
if idn not in self.workers:
self.workers[idn] = WorkerRepresentative(self.WPROTOCOL, idn, service, self.backstream)
if service in self.services:
wq, wr = self.services[service]
wq.put(idn)
else:
self._logger.info('Adding %s to services', service)
q = ServiceQueue()
q.put(idn)
self.services[service] = (q, [])
def unregister_worker(self, idn):
"""
Unregisters a worker from the server.
Args:
idn (bytes): the id of the worker
"""
self._logger.info('Unregistering worker %s', idn)
self.workers[idn].shutdown()
service = self.workers[idn].service
if service in self.services:
wq, wr = self.services[service]
wq.remove(idn)
del self.workers[idn]
def disconnect_worker(self, idn, socket):
"""
Tells worker to disconnect from the server, then unregisters the worker.
Args:
idn (bytes): id of the worker
socket (zmq.socket): which socket to send the message out from
"""
try:
socket.send_multipart([idn, b'', self.WPROTOCOL, mdp.DISCONNECT])
except TypeError as err:
self._logger.error('Encountered error', exc_info=True)
self._logger.info('Disconnecting worker %s', idn)
self.unregister_worker(idn)
def handle_frontend(self, msg):
"""
Callback. Handles messages received from clients.
"""
client_addr = msg.pop(0)
empty = msg.pop(0)
protocol = msg.pop(0)
service = msg.pop(0)
service = service.decode('utf-8')
request = msg[0]
if service == 'disconnect':
# Need to determine how many packets are lost doing this.
self._logger.info('Received disconnect command. Server disconnecting workers')
for w in list(self.workers):
self.disconnect_worker(w, self.backstream.socket)
IOLoop.instance().stop()
else:
self.msgs_recv += 1
try:
wq, wr = self.services[service]
idn = wq.get()
if idn:
self.send_request(self.backstream, idn, client_addr, request)
else:
wr.append(request)
except KeyError:
self._logger.error('Encountered error with service %s', service, exc_info=True)
def handle_backend(self, msg):
"""
Callback. Handles messages received from workers.
"""
worker_idn = msg.pop(0)
empty = msg.pop(0)
protocol = msg.pop(0)
command = msg.pop(0)
if command == mdp.READY:
self.register_worker(worker_idn, msg.pop().decode('utf-8'))
elif command == mdp.REPLY:
client_addr, _, message = msg
service = self.workers[worker_idn].service
try:
wq, wr = self.services[service]
# send it wherever
wq.put(worker_idn)
if wr:
msg = wr.pop(0)
self.send_request(self.backstream, worker_idn, client_addr, msg)
except KeyError as err:
self._logger.error('Encountered error with service %s', service, exc_info=True)
elif command == mdp.HEARTBEAT:
worker = self.workers[worker_idn]
if worker.is_alive():
worker.recv_heartbeat()
elif command == mdp.DISCONNECT:
self.unregister_worker(worker_idn)
else:
self.disconnect_worker(worker_idn, self.backstream)
def send_request(self, socket, idn, client_addr, msg):
"""
Helper function. Formats and sends a request.
Args:
socket (zmq.socket): socket to send message out from
idn (bytes): id of worker to label message with
client_addr (bytes): addr of client requesting the work
msg (list): the message to be processed
"""
request_msg = [idn, b'', self.WPROTOCOL, mdp.REQUEST, client_addr, b'', msg]
socket.send_multipart(request_msg)
class WorkerRepresentative(object):
"""
Represents a worker connected to the server.
Handles heartbeats between the server and a specific worker.
"""
def __init__(self, protocol, idn, service, stream):
self.protocol = protocol
self.idn = idn
self.service = service
self.current_liveness = HB_LIVENESS
self.stream = stream
self.last_heartbeat = 0
self.heartbeater = PeriodicCallback(self.heartbeat, HB_INTERVAL)
self.heartbeater.start()
def heartbeat(self):
"""
Callback. Periodically sends a heartbeat message to associated worker.
"""
self.current_liveness -= 1
self.stream.send_multipart([self.idn, b'', self.protocol, mdp.HEARTBEAT])
def recv_heartbeat(self):
"""
Refreshes current_liveness when a heartbeat message is received from associated worker.
"""
self.current_liveness = HB_LIVENESS
def is_alive(self):
"""
Helper function.
Returns:
False if current_liveness is under 0, True otherwise
"""
return self.current_liveness > 0
def shutdown(self):
"""
Cleans up!
"""
self.heartbeater.stop()
self.heartbeater = None
self.stream = None
class ServiceQueue(object):
"""
Its a queue.
"""
def __init__(self):
self.q = []
def __contains__(self, idn):
return idn in self.queue
def __len__(self):
return len(self.q)
def remove(self, idn):
"""
Removes from the queue.
"""
try:
self.q.remove(idn)
except ValueError:
pass
def put(self, idn):
"""
Put something in the queue.
"""
if idn not in self.q:
self.q.append(idn)
def get(self):
"""
Get something from the queue.
"""
if not self.q:
return None
return self.q.pop(0)
| mit | 1,896,932,177,797,409,500 | 29.408408 | 99 | 0.569326 | false |
NicolasKiely/Ackermann | Ackermann.py | 1 | 3454 | ''' Evaluates Ackermann function
Adopted from here: http://www.eprg.org/computerphile/recursion.htm
Usage:
python Ackermann.py <brute|cache> <m> <n>
Where
<brute|cache> specifies whether to enable the cache
<m> is the first parameter of the Ackermann function
<n> is the second parameter of the Ackermann function
'''
import sys
class Ackermann(object):
''' Wrapper class for the ackerman function '''
def __init__(self, use_cache):
''' Initialize, setup cache if use_cache==True '''
# Number of function calls
self.call_count = 0
self.use_cache = use_cache
if use_cache:
# Cache of evaluated (m,n) => f(m,n) pairs
self.cache = {}
def evaluate(self, m, n):
''' Evaluates ackermann function recursively '''
# Increment call count
self.call_count += 1
if self.use_cache:
# Check cache
if (m, n) in self.cache:
return self.cache[(m, n)]
if m == 0:
results = n + 1
elif n == 0:
results = self.evaluate(m-1, 1)
else:
results = self.evaluate(m-1, self.evaluate(m, n-1))
if self.use_cache:
# Save to cache
self.cache[(m, n)] = results
return results
def print_usage():
print 'Program Usage:'
print '\tpython %s <brute|cache> <m> <n>' % sys.argv[0]
print 'Where:'
print '\t<brute|cache> specifies whether to enable the cache'
print '\t<m> is the first parameter of the Ackermann function'
print '\t<n> is the second parameter of the Ackermann function'
# Acceptable arguments for setting cache
acceptable_nocache_args = ('brute', 'no', 'n')
acceptable_yescache_args = ('cache', 'yes', 'y')
# Message shown when bad ackermann argument passed
bad_number_msg = 'Error, expected positive integer %s argument, got "%s"'
# main()
if __name__ == '__main__':
# Check number of arguments
if len(sys.argv) != 4:
print_usage()
exit()
# Check cache argument
par_cache = sys.argv[1].lower()
if par_cache in acceptable_nocache_args:
use_cache = False
elif par_cache in acceptable_yescache_args:
use_cache = True
else:
# Could not parse first argument
print 'Error, could not understand cache arg %s'
print 'To use the cache, valid strings are: '
print '\t' + ', '.join(acceptable_yescache_args)
print 'To not use the cache, valid strings are: '
print '\t' + ', '.join(acceptable_nocache_args)
print
print_usage()
exit()
# Check m and arguments
ack_pars = [0, 0]
for i, name in enumerate(('<m>', '<n>')):
try:
# Cast parameter to integer
par = sys.argv[2+i]
ack_pars[i] = int(par)
# Make sure parameter is positive
if ack_pars[i] < 0:
raise ValueError
except ValueError:
# Handle casting error
print bad_number_msg % (name, par)
print
print_usage()
exit()
# Argument parsing done, now setup ackermann function and evaluate
ack = Ackermann(use_cache)
results = ack.evaluate(*ack_pars)
# Show results
print 'Ackermann(%d, %d) is: %d' % (ack_pars[0], ack_pars[1], results)
print 'Number of calls: %d' % ack.call_count
| mit | 879,025,367,012,459,400 | 27.081301 | 74 | 0.571801 | false |
leliel12/scikit-criteria | skcriteria/tests/madm/test__dmaker.py | 1 | 3411 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Cabral, Juan; Luczywo, Nadia
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
# DOC
# =============================================================================
"""test electre methods"""
# =============================================================================
# IMPORTS
# =============================================================================
import random
import string
from ...madm import _dmaker
from ..tcore import SKCriteriaTestCase
# =============================================================================
# BASE CLASS
# =============================================================================
class ExtraTest(SKCriteriaTestCase):
def setUp(self):
self.data = {}
for idx in range(random.randint(10, 100)):
key = "".join([
random.choice(string.ascii_letters)
for _ in range(random.randint(10, 30))])
value = "".join([
random.choice(string.ascii_letters)
for _ in range(random.randint(10, 30))])
self.data[key + str(idx)] = value
self.e = _dmaker.Extra(self.data)
def test_eq(self):
self.assertTrue(self.e == _dmaker.Extra(self.data))
def test_ne(self):
e = self.e
self.setUp()
self.assertTrue(self.e != e)
def test_getitem(self):
for k, v in self.data.items():
self.assertEqual(self.e[k], v)
def test_iter(self):
for k in self.e:
self.assertIn(k, self.data)
def test_len(self):
self.assertEqual(len(self.data), len(self.e))
def test_getattr(self):
for k, v in self.data.items():
self.assertEqual(getattr(self.e, k), v)
def test_str(self):
str(self.e)
def test_repr(self):
repr(self.e)
| bsd-3-clause | 5,228,993,042,200,737,000 | 33.806122 | 79 | 0.583113 | false |
alberthdev/nclayer | nc_diag_attr/nc_diag_attr.py | 1 | 9821 | # nc_diag_attr
from netCDF4 import Dataset, getlibversion
import netCDF4
import argparse
import sys
import traceback
import numpy
try:
import ujson as json
except:
import json
# Version information
__version__ = "0.9b"
VERSION_STR = 'nc_diag_attr v' + __version__ + "\n\n" + \
"Using the following library/runtime versions:\n" + \
(" netcdf4-python v%s\n" % netCDF4.__version__) + \
(" NetCDF v%s\n" % getlibversion()) + \
(" HDF5 v%s\n" % netCDF4.__hdf5libversion__) + \
(" Python v%s\n" % sys.version.split("\n")[0].strip())
# CLI Arguments
global args
def parse_cli_args():
global args
parser = argparse.ArgumentParser( #prog='ipush',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Tool to add/modify global and variable attributes for NetCDF files",
version = VERSION_STR)
disable_group = parser.add_mutually_exclusive_group()
parser.add_argument("-V", "--verbose",
dest="verbose", action="store_true", default=False,
help = "enable verbose output")
parser.add_argument("-p", "--pretty",
dest="pretty_output", action="store_true", default=False,
help = "enable colorful, pretty output - don't enable if logging")
disable_group.add_argument("-ng", "--no-global",
dest="global_attributes", action="store_false", default=True,
help = "disable global attribute adding/modifying")
disable_group.add_argument("-nv", "--no-var",
dest="var_attributes", action="store_false", default=True,
help = "disable variable attribute adding/modifying")
parser.add_argument("-rc", metavar = "RESOURCE_FILE", dest="resource_file",
help = "input JSON resource file name with attributes to write", required = True)
parser.add_argument("nc4_files", help = "NetCDF4 files to apply attributes to", nargs="+")
args = parser.parse_args()
def error_msg(msg):
global args
if args.pretty_output:
print("\033[31m ** ERROR: %s\033[0m" % msg)
else:
print(" ** ERROR: %s" % msg)
def warning_msg(msg):
global args
if args.verbose:
if args.pretty_output:
print("\033[33m ** WARNING: %s\033[0m" % msg)
else:
print(" ** WARNING: %s" % msg)
def info_msg(msg):
global args
if args.verbose:
if args.pretty_output:
print("\033[34m ** INFO: %s\033[0m" % msg)
else:
print(" ** INFO: %s" % msg)
global current_line
current_line = ""
# ANSI line updater - if enabled!
def line_msg(msg):
global args, current_line
if args.pretty_output:
# Move cursor to beginning:
sys.stdout.write("\r")
# Erase the current line
sys.stdout.write(len(current_line) * " ")
# Backspace back to the beginning (we could use \r here...)
sys.stdout.write(len(current_line) * "\b")
# Print new message
sys.stdout.write(msg)
# Go back to beginning
sys.stdout.write(len(msg) * "\b")
# Flush output - if not flushed, output may not show up
sys.stdout.flush()
# Set new current line
current_line = msg
else:
print(msg)
def line_msg_done():
global args, current_line
if args.verbose and args.pretty_output:
# Move down from current line and erase current line buffer
sys.stdout.write("\n")
sys.stdout.flush()
current_line = ""
global entry_num, entry_total, entry_str
def init_counter(total_ele, entry):
global entry_num, entry_total, entry_str
if args.verbose:
entry_num = 0
entry_total = total_ele
entry_str = entry
def progress_counter(filename):
global entry_num, entry_total, entry_str
if args.verbose:
entry_num += 1
line_msg("%s %i/%i: %s" % (entry_str, entry_num, entry_total, filename))
def main():
# Parse arguments
parse_cli_args()
# Sanity checks
# Check to make sure that the JSON resource file exists!
try:
resource_file_fh = open(args.resource_file, "r")
except IOError:
error_msg("Resource file '%s' is not accessible or does not exist!" % args.resource_file)
exit(1)
# Check to make sure that the JSON resource file is valid!
try:
resource_data = json.loads(resource_file_fh.read())
except KeyboardInterrupt:
info_msg("CTRL-C detected, exiting.")
exit(0)
except:
error_msg("Resource file '%s' is not a valid JSON file!" % args.resource_file)
print(traceback.format_exc())
exit(1)
# Close file - we got the data already!
resource_file_fh.close()
# Print verbose version information
if args.verbose:
info_msg("Using following versions:")
info_msg(" netcdf4-python v%s" % netCDF4.__version__)
info_msg(" NetCDF v%s" % getlibversion())
info_msg(" HDF5 v%s" % netCDF4.__hdf5libversion__)
info_msg(" Python v%s\n" % sys.version.split("\n")[0].strip())
info_msg("Reading and validating NetCDF4 files...")
# Check to make sure the NetCDF4 files are legitimate!
nc4_files_root = []
init_counter(len(args.nc4_files), "Reading/verifying file")
for nc4_file in args.nc4_files:
try:
open(nc4_file, "r").close()
except KeyboardInterrupt:
info_msg("CTRL-C detected, exiting.")
exit(0)
except IOError:
error_msg("The NetCDF4 file '%s' does not exist!" % nc4_file)
exit(1)
progress_counter(nc4_file)
try:
rootgrp = Dataset(nc4_file, "a", format="NETCDF4")
nc4_files_root.append({ "file" : nc4_file, "group" : rootgrp })
except KeyboardInterrupt:
info_msg("CTRL-C detected, exiting.")
exit(0)
except:
error_msg("'%s' is not a valid NetCDF4 file!" % nc4_file)
exit(1)
line_msg_done()
# Global attributes
if args.global_attributes:
# Check if we have a global attributes entry in the resource file
if not "global_attributes" in resource_data:
warning_msg("Resource file '%s' does not have any global attributes, skipping." % args.resource_file)
else:
# Initialize our counter
init_counter(len(nc4_files_root), "Applying global attributes to file")
for nc4_entry in nc4_files_root:
# Update progress counter
progress_counter(nc4_entry["file"])
for global_attr_key in resource_data["global_attributes"]:
global_attr_val = resource_data["global_attributes"][global_attr_key]
# We need to convert unicode to ASCII
if type(global_attr_val) == unicode:
global_attr_val = str(global_attr_val)
# BUG fix - NetCDF really, really, REALLY does not like
# 64-bit integers. We forcefully convert the value to a
# 32-bit signed integer, with some help from numpy!
if type(global_attr_val) == int:
global_attr_val = numpy.int32(global_attr_val)
setattr(nc4_entry["group"], global_attr_key, global_attr_val)
line_msg_done()
# Variable attributes
if args.var_attributes:
# Check if we have a variable attributes entry in the resource file
if not "variable_attributes" in resource_data:
warning_msg("Resource file '%s' does not have any variable attributes, skipping." % args.resource_file)
else:
# Initialize our counter
init_counter(len(nc4_files_root), "Applying variable attributes to file")
for nc4_entry in nc4_files_root:
# Update progress counter
progress_counter(nc4_entry["file"])
# Iterate through all of our var_attr variables
for var in resource_data["variable_attributes"]:
if var in nc4_entry["group"].variables.keys():
for var_attr_key in resource_data["variable_attributes"][var]:
var_attr_val = resource_data["variable_attributes"][var][var_attr_key]
var_attr_key = str(var_attr_key)
# We need to convert unicode to ASCII
if type(var_attr_val) == unicode:
var_attr_val = list(str(var_attr_val))
# BUG fix - NetCDF really, really, REALLY does not like
# 64-bit integers. We forcefully convert the value to a
# 32-bit signed integer, with some help from numpy!
if type(var_attr_val) == int:
var_attr_val = numpy.int32(var_attr_val)
setattr(nc4_entry["group"].variables[var], var_attr_key, var_attr_val)
else:
warning_msg("Can't find variable %s in file %s!" % (var, nc4_entry["file"]))
line_msg_done()
# Close everything
init_counter(len(nc4_files_root), "Saving changes to file")
for nc4_entry in nc4_files_root:
progress_counter(nc4_entry["file"])
nc4_entry["group"].close()
line_msg_done()
info_msg("Attribute appending complete!")
if __name__ == "__main__":
main()
| apache-2.0 | 8,458,928,210,845,927,000 | 36.060377 | 115 | 0.562875 | false |
BitcoinUnlimited/BitcoinUnlimited | qa/rpc-tests/excessive.py | 1 | 14205 | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
# Test emergent consensus scenarios
import time
import random
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
from test_framework.util import *
from test_framework.blocktools import *
import test_framework.script as script
import pdb
import sys
if sys.version_info[0] < 3:
raise "Use Python 3"
import logging
def mostly_sync_mempools(rpc_connections, difference=50, wait=1, verbose=1):
"""
Wait until everybody has the most of the same transactions in their memory
pools. There is no guarantee that mempools will ever sync due to the
filterInventoryKnown bloom filter.
"""
iterations = 0
while True:
iterations += 1
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
poolLen = [len(pool)]
for i in range(1, len(rpc_connections)):
tmp = set(rpc_connections[i].getrawmempool())
if tmp == pool:
num_match = num_match + 1
if iterations > 10 and len(tmp.symmetric_difference(pool)) < difference:
num_match = num_match + 1
poolLen.append(len(tmp))
if verbose:
logging.info("sync mempool: " + str(poolLen))
if num_match == len(rpc_connections):
break
time.sleep(wait)
class ExcessiveBlockTest (BitcoinTestFramework):
def __init__(self, extended=False):
self.extended = extended
BitcoinTestFramework.__init__(self)
def setup_network(self, split=False):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug=net", "-debug=graphene", "-usecashaddr=0", "-rpcservertimeout=0"], timewait=60 * 10))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug=net", "-debug=graphene", "-usecashaddr=0", "-rpcservertimeout=0"], timewait=60 * 10))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug=net", "-debug=graphene", "-usecashaddr=0", "-rpcservertimeout=0"], timewait=60 * 10))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=net", "-debug=graphene", "-usecashaddr=0", "-rpcservertimeout=0"], timewait=60 * 10))
interconnect_nodes(self.nodes)
self.is_network_split = False
self.sync_all()
if 0: # getnewaddress can be painfully slow. This bit of code can be used to during development to
# create a wallet with lots of addresses, which then can be used in subsequent runs of the test.
# It is left here for developers to manually enable.
TEST_SIZE = 100 # TMP 00
print("Creating addresses...")
self.nodes[0].keypoolrefill(TEST_SIZE + 1)
addrs = [self.nodes[0].getnewaddress() for _ in range(TEST_SIZE + 1)]
with open("walletAddrs.json", "w") as f:
f.write(str(addrs))
pdb.set_trace()
def run_test(self):
BitcoinTestFramework.run_test(self)
self.testCli()
# clear out the mempool
for n in self.nodes:
while len(n.getrawmempool()):
n.generate(1)
sync_blocks(self.nodes)
logging.info("cleared mempool: %s" % str([len(x) for x in [y.getrawmempool() for y in self.nodes]]))
self.testExcessiveBlockSize()
def testCli(self):
# Assumes the default excessive at 32MB and mining at 8MB
try:
self.nodes[0].setminingmaxblock(33000000)
except JSONRPCException as e:
pass
else:
assert(0) # was able to set the mining size > the excessive size
try:
self.nodes[0].setminingmaxblock(99)
except JSONRPCException as e:
pass
else:
assert(0) # was able to set the mining size below our arbitrary minimum
try:
self.nodes[0].setexcessiveblock(1000, 10)
except JSONRPCException as e:
pass
else:
assert(0) # was able to set the excessive size < the mining size
def sync_all(self):
"""Synchronizes blocks and mempools (mempools may never fully sync)"""
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
mostly_sync_mempools(self.nodes[:2])
mostly_sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
mostly_sync_mempools(self.nodes)
def expectHeights(self, blockHeights, waittime=10):
loop = 0
count = []
while loop < waittime:
counts = [x.getblockcount() for x in self.nodes]
if counts == blockHeights:
return True # success!
else:
for (a,b) in zip(counts, blockHeights):
if counts > blockHeights:
assert("blockchain synced too far")
time.sleep(.25)
loop += .25
if int(loop) == loop and (int(loop) % 10) == 0:
logging.info("...waiting %f %s != %s" % (loop, counts, blockHeights))
return False
def repeatTx(self, count, node, addr, amt=1.0):
for i in range(0, count):
node.sendtoaddress(addr, amt)
def generateAndPrintBlock(self, node):
hsh = node.generate(1)
inf = node.getblock(hsh[0])
logging.info("block %d size %d" % (inf["height"], inf["size"]))
return hsh
def testExcessiveBlockSize(self):
# get spendable coins
if 0:
for n in self.nodes:
n.generate(1)
self.sync_all()
self.nodes[0].generate(100)
# Set the accept depth at 1, 2, and 3 and watch each nodes resist the chain for that long
self.nodes[0].setminingmaxblock(5000) # keep the generated blocks within 16*the EB so no disconnects
self.nodes[1].setminingmaxblock(1000)
self.nodes[2].setminingmaxblock(1000)
self.nodes[3].setminingmaxblock(1000)
self.nodes[1].setexcessiveblock(1000, 1)
self.nodes[2].setexcessiveblock(1000, 2)
self.nodes[3].setexcessiveblock(1000, 3)
logging.info("Test excessively sized block, not propagating until accept depth is exceeded")
addr = self.nodes[3].getnewaddress()
# By using a very small value, it is likely that a single input is used. This is important because
# our mined block size is so small in this test that if multiple inputs are used the transactions
# might not fit in the block. This will give us a short block when the test expects a larger one.
# To catch any of these short-block test malfunctions, the block size is printed out.
self.repeatTx(8, self.nodes[0], addr, .001)
counts = [x.getblockcount() for x in self.nodes]
base = counts[0]
logging.info("Starting counts: %s" % str(counts))
logging.info("node0")
self.generateAndPrintBlock(self.nodes[0])
assert_equal(True, self.expectHeights([base + 1, base, base, base]))
logging.info("node1")
self.nodes[0].generate(1)
assert_equal(True, self.expectHeights([base + 2, base + 2, base, base]))
logging.info("node2")
self.nodes[0].generate(1)
assert_equal(True, self.expectHeights([base + 3, base + 3, base + 3, base]))
logging.info("node3")
self.nodes[0].generate(1)
assert_equal(True, self.expectHeights([base + 4] * 4))
# Now generate another excessive block, but all nodes should snap right to
# it because they have an older excessive block
logging.info("Test immediate propagation of additional excessively sized block, due to prior excessive")
self.repeatTx(8, self.nodes[0], addr, .001)
self.nodes[0].generate(1)
assert_equal(True, self.expectHeights([base + 5] * 4))
logging.info("Test daily excessive reset")
# Now generate a day's worth of small blocks which should re-enable the
# node's reluctance to accept a large block
self.nodes[0].generate(6 * 24)
sync_blocks(self.nodes)
self.nodes[0].generate(5) # plus the accept depths
sync_blocks(self.nodes)
self.repeatTx(8, self.nodes[0], addr, .001)
base = self.nodes[0].getblockcount()
self.generateAndPrintBlock(self.nodes[0])
time.sleep(2) # give blocks a chance to fully propagate
counts = [x.getblockcount() for x in self.nodes]
assert_equal(counts, [base + 1, base, base, base])
self.repeatTx(8, self.nodes[0], addr, .001)
self.generateAndPrintBlock(self.nodes[0])
time.sleep(2) # give blocks a chance to fully propagate
sync_blocks(self.nodes[0:2])
counts = [x.getblockcount() for x in self.nodes]
assert_equal(counts, [base + 2, base + 2, base, base])
self.repeatTx(5, self.nodes[0], addr, .001)
self.generateAndPrintBlock(self.nodes[0])
time.sleep(2) # give blocks a chance to fully propagate
sync_blocks(self.nodes[0:3])
counts = [x.getblockcount() for x in self.nodes]
assert_equal(counts, [base + 3, base + 3, base + 3, base])
self.repeatTx(5, self.nodes[0], addr, .001)
self.generateAndPrintBlock(self.nodes[0])
sync_blocks(self.nodes)
counts = [x.getblockcount() for x in self.nodes]
assert_equal(counts, [base + 4] * 4)
self.repeatTx(5, self.nodes[0], addr, .001)
self.generateAndPrintBlock(self.nodes[0])
sync_blocks(self.nodes)
counts = [x.getblockcount() for x in self.nodes]
assert_equal(counts, [base + 5] * 4)
if self.extended:
logging.info("Test daily excessive reset #2")
# Now generate a day's worth of small blocks which should re-enable the
# node's reluctance to accept a large block + 10 because we have to get
# beyond all the node's accept depths
self.nodes[0].generate(6 * 24 + 10)
sync_blocks(self.nodes)
# counts = [ x.getblockcount() for x in self.nodes ]
self.nodes[1].setexcessiveblock(100000, 1) # not sure how big the txns will be but smaller than this
self.nodes[1].setminingmaxblock(100000) # not sure how big the txns will be but smaller than this
self.repeatTx(20, self.nodes[0], addr, .001)
base = self.nodes[0].getblockcount()
self.generateAndPrintBlock(self.nodes[0])
time.sleep(2) # give blocks a chance to fully propagate
sync_blocks(self.nodes[0:2])
counts = [x.getblockcount() for x in self.nodes]
assert_equal(counts, [base + 1, base + 1, base, base])
if self.extended:
logging.info("Random test")
randomRange = 3
else:
randomRange = 0
for i in range(0, randomRange):
logging.info("round %d" % i)
for n in self.nodes:
size = random.randint(1, 1000) * 1000
try: # since miningmaxblock must be <= excessiveblock, raising/lowering may need to run these in different order
n.setminingmaxblock(size)
n.setexcessiveblock(size, random.randint(0, 10))
except JSONRPCException:
n.setexcessiveblock(size, random.randint(0, 10))
n.setminingmaxblock(size)
addrs = [x.getnewaddress() for x in self.nodes]
ntxs = 0
for i in range(0, random.randint(1, 20)):
try:
n = random.randint(0, 3)
logging.info("%s: Send to %d" % (ntxs, n))
self.nodes[n].sendtoaddress(addrs[random.randint(0, 3)], .1)
ntxs += 1
except JSONRPCException: # could be spent all the txouts
pass
logging.info("%d transactions" % ntxs)
time.sleep(1) # allow txns a chance to propagate
self.nodes[random.randint(0, 3)].generate(1)
logging.info("mined a block")
# TODO: rather than sleeping we should really be putting a check in here
# based on what the random excessive seletions were from above
time.sleep(5) # allow block a chance to propagate
# the random test can cause disconnects if the block size is very large compared to excessive size
# so reconnect
interconnect_nodes(self.nodes)
if __name__ == '__main__':
if "--extensive" in sys.argv:
longTest = True
# we must remove duplicate 'extensive' arg here
while True:
try:
sys.argv.remove('--extensive')
except:
break
logging.info("Running extensive tests")
else:
longTest = False
ExcessiveBlockTest(longTest).main()
def info(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(type, value, tb)
else:
import traceback
import pdb
# we are NOT in interactive mode, print the exception...
traceback.print_exception(type, value, tb)
print
# ...then start the debugger in post-mortem mode.
pdb.pm()
sys.excepthook = info
def Test():
t = ExcessiveBlockTest()
t.drop_to_pdb = True
bitcoinConf = {
"debug": ["rpc", "net", "blk", "thin", "mempool", "req", "bench", "evict"],
"blockprioritysize": 2000000, # we don't want any transactions rejected due to insufficient fees...
"blockminsize": 1000000
}
flags = standardFlags()
t.main(flags, bitcoinConf, None)
| mit | -6,959,768,802,692,988,000 | 40.173913 | 155 | 0.600282 | false |
UCSD-E4E/radio_collar_tracker_drone | scripts/ground_control_software/heatMap.py | 1 | 1174 | #!/usr/bin/env python3
import generateKML
import pos_estimate
import numpy as np
import utm
def findMax( someList ):
tempMax = someList[0]
for i in someList:
if tempMax < i:
tempMax = i
return tempMax
def findMin( someList ):
tempMin = someList[0]
for i in someList:
if tempMin > i:
tempMin = i
return tempMin
# data is in form [[x,y,z,rd],[x,y,z,rd],...] in utm
def generateHeatMap( data ):
minHeatDim = [ int( min( data[:,1] ) ), int( min( data[:,0] ) ) ]
maxHeatDim = [ int( max( data[:,1] ) ), int( max( data[:,0] ) ) ]
heatMap = np.zeros(( maxHeatDim[0] - minHeatDim[0] + 1, \
maxHeatDim[1] - minHeatDim[1] + 1 ))
for x, y, z, rd in data:
heatMap[int(y-minHeatDim[1]),int(x-minHeatDim[0])] = 1
zonenum = data.getUTMZone[0]
zone = data.getUTMZone[1]
coords = [[minHeatDim[0],maxHeatDim[1]],
[maxHeatDim[0],maxHeatDim[1]],
[maxHeatDim[0],minHeatDim[1]],
[minHeatDim[0],minHeatDim[1]]]
ll = [utm.to_latlon( x[0], x[1], zonenum, zone_letter=zone ) for x in coords]
ll = [ [x[1],x[0]] for x in ll ]
testKML = generateKML.kmlPackage( "NOTICE", estimate, [heatMap, ll] )
generateKML.generateKML( [ testKML ] )
| gpl-3.0 | 1,732,163,343,495,752,000 | 26.952381 | 78 | 0.626917 | false |
kooksee/myblog | BAE.py | 1 | 11250 | # -*- coding=utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import tornado.wsgi
import tornado.options
import os.path,os,datetime,sys,time,codecs
import markdown
import tohtml
import db
import json
import tohtml
import base64
import uuid
def conf(): #全局设定信息
global NAME,Subtitle,description,keywords,Category,UUID
conf = db.db("SELECT SITENAME,subtitle,description,keywords,uuid FROM CONFIG")[0]
NAME = conf[0]
Subtitle = conf[1]
description = conf[2]
keywords = conf[3]
UUID= conf[4]
if not UUID:
UUID=base64.b64encode(uuid.uuid4().bytes + uuid.uuid4().bytes)
print db.exe("UPDATE config SET uuid='%s' WHERE ID=1" % UUID)
Category = [(i[0],i[1]) for i in db.db("SELECT ID,Category FROM Category")]
Category.append((' ',' '))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
conf()
settings = {
"static_path" : os.path.join(os.path.dirname(__file__), "./static/"),
"template_path" : os.path.join(os.path.dirname(__file__), "./templates/"),
"cookie_secret" : UUID,
"xsrf_cookies" : True,
"login_url": "/login"}
class index(tornado.web.RequestHandler):
def get(self):
self.redirect('/index.html')
class static(tornado.web.RequestHandler):
def get(self,url):
self.write(codecs.open('./html/%s' % url,'r','utf-8').read())
class LoginHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("WLBLOG")
class Manager(LoginHandler):
@tornado.web.authenticated
def post(self):
pass
@tornado.web.authenticated
def get(self):
conf()
s = db.db("SELECT ID,TITLE,CREATETIME,LEIBIE FROM MY")
LIST = [(i[0],i[1],i[2],i[3]) for i in s]
self.render("admin.html",LIST = LIST,title=NAME,tags='tags',NAME=NAME,Category = Category,Subtitle = Subtitle,description=description,keywords=keywords,)
class Edit(LoginHandler): #文章编辑、新建,类别管理
@tornado.web.authenticated
def post(self):
TYPE=self.get_argument('TYPE','')
ID=self.get_argument('ID','')
subject = self.get_argument('subject','')
tags = self.get_argument('tags','')
markdown = self.get_argument('markdown','')
Category = self.get_argument('Category','')
if TYPE=='DEL':
Category = Category[0:-1]
SQL = "DELETE FROM Category WHERE ID IN (%s)" % Category
self.write(db.exe(SQL))
elif TYPE=='NEW':
SQL = "INSERT INTO Category (Category) VALUES ('%s')" % Category
self.write(db.exe(SQL))
elif None or "" in (subject,tags,markdown):
self.write(u"主题、标签、类别及内容均不可为空!")
else:
if db.edit(TYPE,subject.encode("utf-8"),tags.encode("utf-8"),markdown.encode("utf-8"),Category.encode("utf-8"),ID):
tohtml.html().ALL()
self.write(u'OK,Thanks!')
else:
self.write(u'Error!')
@tornado.web.authenticated
def get(self):
conf()
markdown = tags = subject = LEIBIE = ID = ''
ID = self.get_argument('id','')
TYPE = self.get_argument('TYPE','')
if ID:
data=db.MARKDOWN(ID)
subject=data[0]
markdown=data[2].replace('\'\'','\'').replace('\\\\','\\')
tags=data[1]
LEIBIE = data[3]
else:
TYPE="ADD"
self.render("Edit.html",markdown=markdown,
subject=subject,
tags=tags,
title=NAME,
NAME=NAME,
description=description,
keywords=keywords,
Category = Category,
Subtitle = Subtitle,
LEIBIE = LEIBIE,
TYPE = TYPE,ID=ID)
class delete(LoginHandler): #文章删除
@tornado.web.authenticated
def get(self):
ID=self.get_argument('ID','')
if db.delete(ID):
tohtml.html().ALL()
os.remove("./html/%s.html" % ID)
self.write("0")
else:
self.write("数据库异常,刪除失败!")
class update(LoginHandler): #系统全局设定更新
@tornado.web.authenticated
def post(self):
Result = True
NAME=self.get_argument('bkname','')
Subtitle=self.get_argument('subtitle','')
description=self.get_argument('description','')
keywords=self.get_argument('keywords','')
try:
db.db("update CONFIG SET SITENAME='%s',subtitle='%s',description='%s',keywords='%s' WHERE ID=1 " % (NAME,Subtitle,description,keywords))
except:
self.write("ERROR")
else:
tohtml.html().ALL()
self.write("0")
class userupdate(LoginHandler): #用户管理
@tornado.web.authenticated
def post(self):
user = self.get_secure_cookie("WLBLOG")
username=self.get_argument('newuser','')
oldpwd=self.get_argument('oldpwd','')
pwd1=self.get_argument('pwd1','')
if db.check(user,oldpwd):
if not username:
username=user
db.db("UPDATE Ver SET PASSWORD='%s',USERNAME='%s' WHERE USERNAME='%s'" % (pwd1,username,user))
self.write("0")
else:
self.write("密码修改失败,请确认你的输入!")
class custom(LoginHandler): #友情链接、统计代码、多说留言板、文章尾部内容管理
@tornado.web.authenticated
def get(self):
conf()
try:
DUOSHUO = db.db("SELECT DUOSHUO FROM Ver")[0][0]
except:
DUOSHUO = ''
NAV = db.db("SELECT ID,NAME,LINK FROM LINK WHERE TYPE='nav'")
LINK = db.db("SELECT ID,NAME,LINK FROM LINK WHERE TYPE='link'")
LAST = db.db("SELECT ID,NAME,Remark,HTML FROM Other WHERE LOCATION='last'")
self.render("custom.html",title=NAME,NAME=NAME,
Category = Category,
Subtitle = Subtitle,
description=description,
keywords=keywords,DUOSHUO = DUOSHUO,NAV = NAV,
LINK = LINK,LAST = LAST)
def post(self):
CMD = self.get_argument('CMD','')
ID = self.get_argument('ID','')
name = self.get_argument('name','')
TYPE = self.get_argument('TYPE','')
remark = self.get_argument('remark','')
HTML = self.get_argument('EHTML','')
LINK = self.get_argument('LINK','')
if CMD=='DEL':
if TYPE in ('NAV','LINK'):
try:
db.db("DELETE FROM LINK WHERE ID='%s' " % ID)
except:
pass
elif TYPE=='LAST':
try:
db.db("DELETE FROM Other WHERE ID='%s' " % ID)
except:
pass
tohtml.html().ALL()
self.redirect('/custom')
elif CMD=='UP':
if TYPE=="LAST":
db.db("UPDATE Other SET NAME='%s',HTML='%s',Remark='%s' WHERE ID='%s'" % (name,HTML.replace('\'','\'\'').replace('\\','\\\\'),remark,ID))
elif TYPE in ('NAV','LINK'):
db.db("UPDATE LINK SET NAME='%s',LINK='%s' WHERE ID='%s'" % (name,LINK,ID))
tohtml.html().ALL()
self.redirect('/custom')
elif CMD=='NEW':
if TYPE=="LAST":
db.db("INSERT INTO Other (NAME,HTML,Remark,LOCATION,TYPE) VALUES ('%s','%s','%s','%s','belong')" % (name,HTML.replace('\'','\'\'').replace('\\','\\\\'),remark,TYPE.lower()))
elif TYPE in ('NAV','LINK'):
db.db("INSERT INTO LINK (NAME,LINK,TYPE) VALUES ('%s','%s','%s')" % (name,LINK,TYPE.lower()))
tohtml.html().ALL()
self.redirect('/custom')
elif CMD == 'HTML':
try:
HTML = db.db("SELECT HTML FROM Other WHERE ID='%s' " % ID)[0][0]
except:
pass
else:
self.write(HTML.strip().replace('\'\'','\'').replace('\\\\','\\'))
elif CMD=="DUOSHUO":
try:
db.db("UPDATE Ver SET DUOSHUO='%s' WHERE ID='1' " % name)
except Exception as e:
self.write("设定失败,原因:%s" % e)
else:
tohtml.html().ALL()
self.write("多说ID已成功设定为:%s" % name)
elif CMD=="JS":
if TYPE=='CX':
try:
JSCODE = db.db("SELECT HTML FROM Other WHERE NAME='JSCODE' ")[0][0]
except:
self.write('')
else:
self.write(JSCODE.replace('\'\'','\'').replace('\\\\','\\'))
elif TYPE=='UP':
try:
db.db("UPDATE Other SET HTML='%s' WHERE NAME='JSCODE'" % HTML.replace('\'','\'\'').replace('\\','\\\\'))
except Exception as e:
self.write(u'修改失败!')
else:
tohtml.html().ALL()
self.write(u'修改成功!')
class generate(LoginHandler):
@tornado.web.authenticated
def get(self):
tohtml.html().ALL()
self.redirect('/')
class upload(LoginHandler):
@tornado.web.authenticated
def post(self):
upload_path=os.path.join(os.path.dirname(__file__),'static/image/')
file_metas = self.request.files['editormd-image-file']
filename = ''
for meta in file_metas:
filename=time.strftime("%Y%m%d%H%M%S", time.localtime()) + meta['filename']
filepath=os.path.join(upload_path,filename)
with open(filepath,'wb') as up:
up.write(meta['body'])
print filename
s = {'success':1,'message': 'OK','url':'static/image/%s' % filename}
self.write(json.dumps(s))
class login(tornado.web.RequestHandler):
def get(self):
conf()
if self.get_secure_cookie("WLBLOG"):
self.redirect("/admin")
else:
self.render("login.html",title=NAME,NAME=NAME,
Category = Category,
Subtitle = Subtitle,
description=description,
keywords=keywords,)
def post(self):
username = self.get_argument('username','')
password = self.get_argument('password','')
if db.check(username,password):
self.set_secure_cookie("WLBLOG",username)
self.write("1")
else:
self.write("0")
class logout(tornado.web.RequestHandler):
def get(self):
self.clear_all_cookies()
self.redirect("/admin")
App = tornado.wsgi.WSGIApplication([
(r'/',index),
(r'/(.*\.html$)',static),
(r'/admin',Manager),
(r'/edit',Edit),
(r'/del',delete),
(r'/update',update),
(r'/upload',upload),
(r'/userupdate',userupdate),
(r'/custom',custom),
(r'/generate',generate),
(r'/login',login),
(r'/logout',logout)
],**settings)
from bae.core.wsgi import WSGIApplication
application = WSGIApplication(App)
| gpl-2.0 | 1,647,006,971,386,371,600 | 37.166667 | 190 | 0.525109 | false |
tmetsch/pyssf | docs/source/conf.py | 1 | 8255 | # -*- coding: utf-8 -*-
#
# pyssf documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 25 10:29:07 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Service Sharing Facility'
copyright = u'2010-2012, Platform Computing'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4.6'
# The full version, including alpha/beta/rc tags.
release = '0.4.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Service Sharing Facility'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/pyssf_logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ServiceSharingFacilitydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyssf.tex', u'Service Sharing Facility Documentation',
u'Platform Computing', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'images/pyssf_logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyssf', u'pyssf Documentation',
[u'Platform Computing'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pyssf'
epub_author = u'Platform Computing'
epub_publisher = u'Platform Computing'
epub_copyright = u'2010-2012, Platform Computing'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| lgpl-3.0 | 2,553,244,000,291,390,500 | 31.5 | 80 | 0.709267 | false |
reclosedev/mitm_relay | socket_relay.py | 1 | 6570 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import select
import logging
log = logging.getLogger(__name__)
class Server:
def __init__(self, relays, timeout=0.3):
self._relays = list(relays)
self.timeout = timeout
self.input_map = {}
self.links = {}
def main_loop(self):
for relay in self._relays:
self.add_relay(relay)
while True:
rlist, _, _ = select.select(self.input_map, [], [], self.timeout)
#log.debug("%s %s", len(rlist), len(self.input_map))
for sock in rlist:
obj = self.input_map[sock]
#log.debug("SO: %s, %s", sock, obj)
if isinstance(obj, Relay):
pipes = obj.new_client()
for pipe in pipes:
self.input_map[pipe.input_socket] = pipe
self.links[pipes[0]] = pipes[1]
self.links[pipes[1]] = pipes[0]
elif isinstance(obj, Pipe):
obj.on_read()
self.close_link_if_finished(obj)
def add_relay(self, relay):
self.input_map[relay.listen_socket] = relay
relay.listen()
def close_link_if_finished(self, pipe1):
if pipe1.work_done:
self.input_map.pop(pipe1.input_socket, None)
else:
return
pipe2 = self.links.get(pipe1)
if not (pipe2 and pipe2.work_done):
return
for pipe in pipe1, pipe2:
pipe.close()
self.links.pop(pipe, None)
self.input_map.pop(pipe.input_socket, None)
class Relay(object):
def __init__(self, listen_port, target_host=None, to_port=None, listen_host="127.0.0.1", backlog=200,
input_transform=None, output_transform=None):
self.listen_port = listen_port
self.target_host = target_host or listen_host
self.target_port = to_port or listen_port
self.listen_host = listen_host
self.listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.backlog = backlog
self.input_transform = input_transform
self.output_transform = output_transform
def listen(self):
log.info("%s listen", self)
self.listen_socket.bind((self.listen_host, self.listen_port))
self.listen_socket.listen(self.backlog)
def _accept_client(self):
client_socket, client_address = self.listen_socket.accept()
log.info("New client %s:%s", *client_address)
return client_socket
def _connect_upstream(self):
upstream_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
log.info("Connecting to %s:%s", self.target_host, self.target_port)
upstream_socket.connect((self.target_host, self.target_port))
return upstream_socket
def new_client(self):
client_socket = self._accept_client()
upstream_socket = self._connect_upstream()
log.debug("Create pipes")
receiver = Pipe(self, client_socket, upstream_socket, transform=self.input_transform)
sender = Pipe(self, upstream_socket, client_socket, transform=self.output_transform)
return receiver, sender
def __repr__(self):
return "<%s(%s, %s, %s)>" % (self.__class__.__name__, self.listen_port, self.target_host, self.target_port)
class ProxiedRelay(Relay):
def __init__(self, proxy_host, proxy_port, *args, **kwargs):
super(ProxiedRelay, self).__init__(*args, **kwargs)
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def _connect_upstream(self):
upstream_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
log.info("Connecting to proxy %s:%s", self.proxy_host, self.proxy_port)
upstream_socket.connect((self.proxy_host, self.proxy_port))
data = "CONNECT %s:%d HTTP/1.0\r\n\r\n" % (self.target_host, self.target_port)
data = data.encode("ascii")
log.debug("Proxy query: %r", data)
upstream_socket.sendall(data)
fp = upstream_socket.makefile("rb")
while True:
data = fp.readline()
if data in (b"", b"\n", b"\r\n"):
break
log.debug("Proxy response: %r", data)
return upstream_socket
class Pipe(object):
data_debug = 1
def __init__(self, relay, input_socket, output_socket,
buffer_size=1024 * 1024, transform=None):
self.relay = relay
self.input_socket = input_socket
self.output_socket = output_socket
self.buffer_size = buffer_size
self.transform = transform
self.input_peername = self.input_socket.getpeername()
self.output_peername = self.output_socket.getpeername()
self.work_done = False
def on_read(self):
try:
data = self.input_socket.recv(self.buffer_size)
except socket.error:
log.exception("%s exception in recv():", self)
self.work_done = True
return
if not data:
if self.data_debug:
log.debug("%s no data received", self)
self.work_done = True
return
if self.data_debug:
log.debug("%s data: %r", self, data)
if self.transform:
data = self.transform(data)
if not data:
return
try:
self.output_socket.sendall(data)
except socket.error:
log.exception("%s exception in sendall():", self)
self.work_done = True
def close(self):
log.info("%s closing", self)
self.input_socket.close()
self.output_socket.close()
def __repr__(self):
return "<Pipe(%s, %s)>" % (self.input_peername, self.output_peername)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format="%(levelname)s %(message)s")
def in_transform(data):
print("INPUT TRANSFORM %r" % data)
return data.replace(b"/ip", b"/cookies")
def out_transform(data):
print("OUTPUT TRANSFORM %r" % data)
return data + b"transformed"
server = Server([
Relay(8080, "httpbin.org", 80, input_transform=in_transform, output_transform=out_transform),
ProxiedRelay("127.0.0.1", 8888, 9080, "httpbin.org", 80)
])
try:
server.main_loop()
except KeyboardInterrupt:
print("Stopping server...")
| mit | -6,196,941,404,122,939,000 | 31.524752 | 115 | 0.576712 | false |
erpletzerp/letzerpcore | frappe/core/doctype/user/user.py | 1 | 15029 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, now, get_gravatar
from frappe import throw, msgprint, _
from frappe.auth import _update_password
from frappe.desk.notifications import clear_notifications
import frappe.permissions
STANDARD_USERS = ("Guest", "Administrator")
from frappe.model.document import Document
class User(Document):
def autoname(self):
"""set name as email id"""
if self.name not in STANDARD_USERS:
self.email = self.email.strip()
self.name = self.email
def validate(self):
self.in_insert = self.get("__islocal")
if self.name not in STANDARD_USERS:
self.validate_email_type(self.email)
self.add_system_manager_role()
self.validate_system_manager_user_type()
self.check_enable_disable()
self.update_gravatar()
self.ensure_unique_roles()
self.remove_all_roles_for_guest()
if self.language == "Loading...":
self.language = None
def check_enable_disable(self):
# do not allow disabling administrator/guest
if not cint(self.enabled) and self.name in STANDARD_USERS:
frappe.throw(_("User {0} cannot be disabled").format(self.name))
if not cint(self.enabled):
self.a_system_manager_should_exist()
# clear sessions if disabled
if not cint(self.enabled) and getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
def add_system_manager_role(self):
# if adding system manager, do nothing
if not cint(self.enabled) or ("System Manager" in [user_role.role for user_role in
self.get("user_roles")]):
return
if self.name not in STANDARD_USERS and self.user_type == "System User" and not self.get_other_system_managers():
msgprint(_("Adding System Manager to this User as there must be atleast one System Manager"))
self.append("user_roles", {
"doctype": "UserRole",
"role": "System Manager"
})
def validate_system_manager_user_type(self):
#if user has system manager role then user type should be system user
if ("System Manager" in [user_role.role for user_role in
self.get("user_roles")]) and self.get("user_type") != "System User":
frappe.throw(_("User with System Manager Role should always have User Type: System User"))
def email_new_password(self, new_password=None):
if new_password and not self.in_insert:
_update_password(self.name, new_password)
self.password_update_mail(new_password)
frappe.msgprint(_("New password emailed"))
def on_update(self):
# owner is always name
frappe.db.set(self, 'owner', self.name)
# clear new password
new_password = self.new_password
self.db_set("new_password", "")
clear_notifications(user=self.name)
frappe.clear_cache(user=self.name)
try:
if self.in_insert:
if self.name not in STANDARD_USERS:
if new_password:
# new password given, no email required
_update_password(self.name, new_password)
if not getattr(self, "no_welcome_mail", False):
self.send_welcome_mail()
msgprint(_("Welcome email sent"))
return
else:
self.email_new_password(new_password)
except frappe.OutgoingEmailError:
pass # email server not set, don't send email
def update_gravatar(self):
if not self.user_image:
self.user_image = get_gravatar(self.name)
@Document.hook
def validate_reset_password(self):
pass
def reset_password(self):
from frappe.utils import random_string, get_url
key = random_string(32)
self.db_set("reset_password_key", key)
self.password_reset_mail(get_url("/update-password?key=" + key))
def get_other_system_managers(self):
return frappe.db.sql("""select distinct user.name from tabUserRole user_role, tabUser user
where user_role.role='System Manager'
and user.docstatus<2
and ifnull(user.enabled,0)=1
and user_role.parent = user.name
and user_role.parent not in ('Administrator', %s) limit 1""", (self.name,))
def get_fullname(self):
"""get first_name space last_name"""
return (self.first_name or '') + \
(self.first_name and " " or '') + (self.last_name or '')
def password_reset_mail(self, link):
self.send_login_mail(_("Password Reset"), "templates/emails/password_reset.html", {"link": link})
def password_update_mail(self, password):
self.send_login_mail(_("Password Update"), "templates/emails/password_update.html", {"new_password": password})
def send_welcome_mail(self):
from frappe.utils import random_string, get_url
key = random_string(32)
self.db_set("reset_password_key", key)
link = get_url("/update-password?key=" + key)
self.send_login_mail(_("Verify Your Account"), "templates/emails/new_user.html", {"link": link})
def send_login_mail(self, subject, template, add_args):
"""send mail with login details"""
from frappe.utils.user import get_user_fullname
from frappe.utils import get_url
mail_titles = frappe.get_hooks().get("login_mail_title", [])
title = frappe.db.get_default('company') or (mail_titles and mail_titles[0]) or ""
full_name = get_user_fullname(frappe.session['user'])
if full_name == "Guest":
full_name = "Administrator"
args = {
'first_name': self.first_name or self.last_name or "user",
'user': self.name,
'title': title,
'login_url': get_url(),
'user_fullname': full_name
}
args.update(add_args)
sender = frappe.session.user not in STANDARD_USERS and frappe.session.user or None
frappe.sendmail(recipients=self.email, sender=sender, subject=subject,
message=frappe.get_template(template).render(args))
def a_system_manager_should_exist(self):
if not self.get_other_system_managers():
throw(_("There should remain at least one System Manager"))
def on_trash(self):
frappe.clear_cache(user=self.name)
if self.name in STANDARD_USERS:
throw(_("User {0} cannot be deleted").format(self.name))
self.a_system_manager_should_exist()
# disable the user and log him/her out
self.enabled = 0
if getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
# delete their password
frappe.db.sql("""delete from __Auth where user=%s""", (self.name,))
# delete todos
frappe.db.sql("""delete from `tabToDo` where owner=%s""", (self.name,))
frappe.db.sql("""update tabToDo set assigned_by=null where assigned_by=%s""",
(self.name,))
# delete events
frappe.db.sql("""delete from `tabEvent` where owner=%s
and event_type='Private'""", (self.name,))
frappe.db.sql("""delete from `tabEvent User` where person=%s""", (self.name,))
# delete messages
frappe.db.sql("""delete from `tabComment` where comment_doctype='Message'
and (comment_docname=%s or owner=%s)""", (self.name, self.name))
def before_rename(self, olddn, newdn, merge=False):
frappe.clear_cache(user=olddn)
self.validate_rename(olddn, newdn)
def validate_rename(self, olddn, newdn):
# do not allow renaming administrator and guest
if olddn in STANDARD_USERS:
throw(_("User {0} cannot be renamed").format(self.name))
self.validate_email_type(newdn)
def validate_email_type(self, email):
from frappe.utils import validate_email_add
email = email.strip()
if not validate_email_add(email):
throw(_("{0} is not a valid email id").format(email))
def after_rename(self, olddn, newdn, merge=False):
tables = frappe.db.sql("show tables")
for tab in tables:
desc = frappe.db.sql("desc `%s`" % tab[0], as_dict=1)
has_fields = []
for d in desc:
if d.get('Field') in ['owner', 'modified_by']:
has_fields.append(d.get('Field'))
for field in has_fields:
frappe.db.sql("""\
update `%s` set `%s`=%s
where `%s`=%s""" % \
(tab[0], field, '%s', field, '%s'), (newdn, olddn))
# set email
frappe.db.sql("""\
update `tabUser` set email=%s
where name=%s""", (newdn, newdn))
# update __Auth table
if not merge:
frappe.db.sql("""update __Auth set user=%s where user=%s""", (newdn, olddn))
def add_roles(self, *roles):
for role in roles:
if role in [d.role for d in self.get("user_roles")]:
continue
self.append("user_roles", {
"doctype": "UserRole",
"role": role
})
self.save()
def remove_roles(self, *roles):
existing_roles = dict((d.role, d) for d in self.get("user_roles"))
for role in roles:
if role in existing_roles:
self.get("user_roles").remove(existing_roles[role])
self.save()
def remove_all_roles_for_guest(self):
if self.name == "Guest":
self.set("user_roles", list(set(d for d in self.get("user_roles") if d.role == "Guest")))
def ensure_unique_roles(self):
exists = []
for i, d in enumerate(self.get("user_roles")):
if (not d.role) or (d.role in exists):
self.get("user_roles").remove(d)
else:
exists.append(d.role)
@frappe.whitelist()
def get_languages():
from frappe.translate import get_lang_dict
import pytz
languages = get_lang_dict().keys()
languages.sort()
return {
"languages": [""] + languages,
"timezones": pytz.all_timezones
}
@frappe.whitelist()
def get_all_roles(arg=None):
"""return all roles"""
return [r[0] for r in frappe.db.sql("""select name from tabRole
where name not in ('Administrator', 'Guest', 'All') order by name""")]
@frappe.whitelist()
def get_user_roles(arg=None):
"""get roles for a user"""
return frappe.get_roles(frappe.form_dict['uid'])
@frappe.whitelist()
def get_perm_info(arg=None):
"""get permission info"""
return frappe.db.sql("""select * from tabDocPerm where role=%s
and docstatus<2 order by parent, permlevel""", (frappe.form_dict['role'],), as_dict=1)
@frappe.whitelist(allow_guest=True)
def update_password(new_password, key=None, old_password=None):
# verify old password
if key:
user = frappe.db.get_value("User", {"reset_password_key":key})
if not user:
return _("Cannot Update: Incorrect / Expired Link.")
elif old_password:
user = frappe.session.user
if not frappe.db.sql("""select user from __Auth where password=password(%s)
and user=%s""", (old_password, user)):
return _("Cannot Update: Incorrect Password")
_update_password(user, new_password)
frappe.db.set_value("User", user, "reset_password_key", "")
frappe.local.login_manager.logout()
return _("Password Updated")
@frappe.whitelist(allow_guest=True)
def sign_up(args):
args=eval(args)
from frappe.utils import get_url, cstr
import json
import requests
if get_url()=='http://demo.letzerp.com':
#frappe.errprint(['url',get_url()])
#frappe.db.sql("""insert into `tabDemo Sites` (email,full_name,domain_name,company_name) values(%s,%s,%s,%s);""",(args['email'],args['full_name'],args['subdomain'],args['company_name']))
s = requests.session()
login_details = {'usr': 'administrator', 'pwd': 'admin'}
url = 'http://letzerp.com/api/method/[email protected]&pwd=password'
headers = {'content-type': 'application/x-www-form-urlencoded'}
#frappe.errprint([url, 'data='+json.dumps(login_details)])
response = s.post(url)
url='http://letzerp.com/api/resource/Lead/?fields=["domain_name", "name"]&filters=[["Lead", "domain_name", "=", "%s"]]'%(args['subdomain']+'.letzerp.com')
requests= s.get(url, headers=headers)
if requests.text :
frappe.errprint(requests.text)
lead_dict=json.loads(requests.text)
if len(lead_dict['data']) > 0 :
return (_("Domain already exist with same name..Please choose another domain..!"))
else:
url = 'http://letzerp.com/api/resource/Lead'
headers = {'content-type': 'application/x-www-form-urlencoded'}
data={}
data['lead_name']=args['full_name']
data['company_name']=args['company_name']
data['email_id']=args['email']
data['domain_name']=args['subdomain']+'.letzerp.com'
# frappe.errprint('data='+json.dumps(data))
response = s.post(url, data='data='+json.dumps(data), headers=headers)
# frappe.errprint(response.text)
return (_("Registration Details will be send on your email id soon. "))
@frappe.whitelist(allow_guest=True)
def reset_password(user):
if user=="Administrator":
return _("Not allowed to reset the password of {0}").format(user)
try:
user = frappe.get_doc("User", user)
user.validate_reset_password()
user.reset_password()
return _("Password reset instructions have been sent to your email")
except frappe.DoesNotExistError:
return _("User {0} does not exist").format(user)
def user_query(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import get_match_cond
txt = "%{}%".format(txt)
return frappe.db.sql("""select name, concat_ws(' ', first_name, middle_name, last_name)
from `tabUser`
where ifnull(enabled, 0)=1
and docstatus < 2
and name not in ({standard_users})
and user_type != 'Website User'
and ({key} like %s
or concat_ws(' ', first_name, middle_name, last_name) like %s)
{mcond}
order by
case when name like %s then 0 else 1 end,
case when concat_ws(' ', first_name, middle_name, last_name) like %s
then 0 else 1 end,
name asc
limit %s, %s""".format(standard_users=", ".join(["%s"]*len(STANDARD_USERS)),
key=searchfield, mcond=get_match_cond(doctype)),
tuple(list(STANDARD_USERS) + [txt, txt, txt, txt, start, page_len]))
def get_total_users(exclude_users=None):
"""Returns total no. of system users"""
return len(get_system_users(exclude_users=exclude_users))
def get_system_users(exclude_users=None):
if not exclude_users:
exclude_users = []
elif not isinstance(exclude_users, (list, tuple)):
exclude_users = [exclude_users]
exclude_users += list(STANDARD_USERS)
system_users = frappe.db.sql_list("""select name from `tabUser`
where enabled=1 and user_type != 'Website User'
and name not in ({})""".format(", ".join(["%s"]*len(exclude_users))),
exclude_users)
return system_users
def get_active_users():
"""Returns No. of system users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type != 'Website User'
and name not in ({})
and hour(timediff(now(), last_login)) < 72""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0]
def get_website_users():
"""Returns total no. of website users"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'""")[0][0]
def get_active_website_users():
"""Returns No. of website users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'
and hour(timediff(now(), last_login)) < 72""")[0][0]
def get_permission_query_conditions(user):
if user=="Administrator":
return ""
else:
return """(`tabUser`.name not in ({standard_users}))""".format(
standard_users='"' + '", "'.join(STANDARD_USERS) + '"')
def has_permission(doc, user):
if (user != "Administrator") and (doc.name in STANDARD_USERS):
# dont allow non Administrator user to view / edit Administrator user
return False
else:
return True
| mit | 3,309,310,681,578,158,600 | 32.621924 | 188 | 0.682214 | false |
edm1/error-aware-demultiplexer | src/demultiplexer.py | 1 | 16772 | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Edward Mountjoy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from src.probabilisticSeqMatch import sequences_match_prob
from src.probabilisticSeqMatch import base_prob
from src.fastqparser import phred_score_dict
from src.fastqparser import fastqIterator
from src.fastqparser import Fastq
from src.fastqparser import fastqWriter
from src.progressbar import Bar
from operator import itemgetter
from datetime import timedelta
from shutil import rmtree
import glob
import gzip
import sys
import os
#import concurrent.futures as cf
def run(args):
print("Precomputing base probabilities...")
# Precompute string to phred scores dictionary
phred_dict = phred_score_dict(args.phredOffset)
# Precompute base probabilities for phredscores up to 50
base_prob_precompute = {}
for letter in phred_dict:
base_prob_precompute[letter] = base_prob(phred_dict[letter])
# Convert index qual argument to a qual character
args.indexQual = chr(args.indexQual + args.phredOffset)
print("Searching for fastqs...")
# Check that the multiplexed path exists
multiplexed_dir = os.path.join(args.inDir, "multiplexed")
if not os.path.exists(multiplexed_dir):
sys.exit("Directory '<inDir>/multiplexed' does not exist. Re-run with"
" different <inDir>")
# Create out directory
out_dir = "demultiplexed"
if args.uniqID != None:
out_dir += "_{0}".format(args.uniqID)
out_dir = os.path.join(args.inDir, out_dir)
create_folder(out_dir)
# Initiate multiplexed class
multiplexed = Multiplex(multiplexed_dir)
print("Loading index sequences...")
# Initiate sample sheet and read possible indexes
sampleSheet = SampleSheet(args.sampleSheet)
sampleSheet.parse(args.indexQual, base_prob_precompute)
# Check that there are the same number of indexes in sample sheet and
# multiplexed fastqs
if sampleSheet.is_dualindexed != multiplexed.is_dualindexed:
sys.exit("Error: Different number of indexes in sampleSheet and "
"multiplexed reads. Exiting!")
print("Initiating...")
# Open output class for each sample, and a not_assigned group
sample_out = {}
for sample in list(sampleSheet.sample_indexes.keys()) + ['not_assigned']:
sample_out[sample] = Sample(sample, out_dir, multiplexed.is_pairend,
multiplexed.is_dualindexed)
# Initiate progress bar
num_records = file_len(multiplexed.barcode_paths[0]) / 4
bar = Bar('Demultiplexing', max=int(num_records/10000),
suffix='%(percent)d%% %(eta)a secs')
c = 1
for variables in futures_iterate_reads(base_prob_precompute,
multiplexed, sampleSheet, args.minProb):
# Get output
output = futures_barcode_to_indexes(variables)
# Unpack output
((read_records, barcode_records), sample, prob, _) = output
# Write record to correct sample file
sample_out[sample].write(read_records, barcode_records)
# Update progress
if c % 10000 == 0:
bar.next()
c += 1
# Close progress bar
bar.finish()
# Close all sample handles
for sample_name in sample_out:
sample_out[sample_name].close_handles()
print("Finished!")
"""
# Send each read/barcode record to futures to match up to sample
with cf.ProcessPoolExecutor(max_workers=args.numCPU) as executor:
c = 1
# Map read/barcode records
for output in executor.map(futures_barcode_to_indexes,
futures_iterate_reads(multiplexed, sampleSheet,
base_prob_precompute, args.minProb)):
# Unpack output
((read_records, barcode_records), sample, prob, _) = output
# Write record to correct sample file
sample_out[sample].write(read_records, barcode_records)
# Update progress
if c % 1000 == 0:
print(c)
c += 1
"""
return 0
def futures_iterate_reads(base_prob_precompute, multiplexed, sampleSheet,
min_prob):
""" Returns an iterator that contains everything needed for futures.
"""
for combined_record in multiplexed.iterate(base_prob_precompute):
yield (combined_record, sampleSheet, min_prob)
def futures_barcode_to_indexes(variables):
""" Compares the reads barcodes to sample indexes and returns matching
sample name.
"""
# Unpack variables
(combined_record, sampleSheet, min_prob) = variables
# Get barcode records
_, barcode_records = combined_record
# Find sample
b1_header, sample, prob = match_barcode_to_indexes(barcode_records,
sampleSheet, min_prob)
if sample == None:
sample = 'not_assigned'
# Append probability to barcode1 header
b1_header = "{0} {1}".format(b1_header, prob)
# Change header
combined_record[1][0].id = b1_header
return combined_record, sample, prob, b1_header
def match_barcode_to_indexes(barcode_records, sampleSheet, min_prob):
""" For the barcode pair, caluclates probability of a match against each set
of indexes
"""
index_probs = {}
for sample_name in sampleSheet.sample_indexes:
index_records = sampleSheet.sample_indexes[sample_name]
# Calculate the match probability for barcode 1
b1_prob = sequences_match_prob(index_records[0].seq,
index_records[0].qual_prob,
barcode_records[0].seq,
barcode_records[0].qual_prob, 0)
# Do for second barcode if present
if sampleSheet.is_dualindexed:
# Skip if already below the threshold, else assign same prob as b1
if b1_prob >= min_prob:
b2_prob = sequences_match_prob(index_records[1].seq,
index_records[1].qual_prob,
barcode_records[1].seq,
barcode_records[1].qual_prob, 0)
else:
b2_prob = b1_prob
# Caluclate combined probability
if sampleSheet.is_dualindexed:
overall_prob = b1_prob * b2_prob
else:
overall_prob = b1_prob
# Save result
index_probs[sample_name] = overall_prob
# Sort the results by their probability
sorted_probs = sorted(index_probs.items(), key=itemgetter(1),
reverse=True)
# Return header, sample, prob
header = barcode_records[0].id
if sorted_probs[0][1] > min_prob:
return header, sorted_probs[0][0], sorted_probs[0][1]
else:
return header, None, sorted_probs[0][1]
class Sample:
# Class for each possible sample. 1) Holds the output directory for that
# sample. 2) Opens handles. 3) Writes record to sample.
def __init__(self, name, out_dir, is_pe, id_dual):
self.read_paths = []
self.barcode_paths = []
self.read_handles = None
self.barcode_handles = None
# Create directory for sample
name = name.replace(' ', '_')
self.sample_dir = os.path.join(out_dir, name)
create_folder(self.sample_dir)
# Create read paths
self.read_paths.append(os.path.join(self.sample_dir,
'{0}.R1.fastq.gz'.format(name)))
if is_pe:
self.read_paths.append(os.path.join(self.sample_dir,
'{0}.R2.fastq.gz'.format(name)))
# Create barcode paths
self.barcode_paths.append(os.path.join(self.sample_dir,
'{0}.barcode_1.fastq.gz'.format(name)))
if id_dual:
self.barcode_paths.append(os.path.join(self.sample_dir,
'{0}.barcode_2.fastq.gz'.format(name)))
def open_handles(self):
""" For the reads and barcodes, opens output handles.
"""
self.read_handles = [get_handle(read_path, 'w') for read_path
in self.read_paths]
self.barcode_handles = [get_handle(barcode_path, 'w') for barcode_path
in self.barcode_paths]
return 0
def write(self, read_records, barcode_records):
""" Writes the demultiplexed read and barcode records to sample file.
"""
# Open handles if not open
if self.read_handles == None:
self.open_handles()
# Write read records
for i in range(len(read_records)):
fastqWriter(read_records[i], self.read_handles[i])
# Write barcode records
for i in range(len(barcode_records)):
fastqWriter(barcode_records[i], self.barcode_handles[i])
return 0
def close_handles(self):
""" Closes any open handles.
"""
if self.read_handles != None:
for handle in self.read_handles + self.barcode_handles:
handle.close()
return 0
class SampleSheet:
# Class to hold the sample sheet and retrieve indexes from it.
def __init__(self, path):
self.path = path
def parse(self, index_qual, base_prob_precompute):
""" Parses the sample sheet to retrieve the indexes for each sample.
"""
sample_indexes = {}
with open(self.path, 'r') as in_h:
# Skip to line after [Data]
line = in_h.readline()
while not line.startswith('[Data]'):
line = in_h.readline()
# Get header
header = in_h.readline().rstrip().lower().split(',')
col_ind = dict(zip(header, range(len(header))))
# Save whether it is dual indexed
if "index2" in col_ind.keys():
self.is_dualindexed = True
else:
self.is_dualindexed = False
# Get indexes
for line in in_h:
# Break if EOF
if line.strip() == "":
break
# Get info
parts = line.rstrip().split(',')
sample_name = parts[col_ind['sample_name']]
# If sample_name is empty, take sample_id instead
if sample_name == "":
sample_name = parts[col_ind['sample_id']]
# Get first index
index1 = parts[col_ind['index']]
sample_indexes[sample_name] = [index1]
# Get second index
if self.is_dualindexed:
index2 = parts[col_ind['index2']]
sample_indexes[sample_name].append(index2)
# Convert indexes to seqIO seqRecords
self.sample_indexes = self.convert_index_to_fastqRecord(sample_indexes,
index_qual, base_prob_precompute)
return 0
def convert_index_to_fastqRecord(self, sample_indexes, index_qual,
base_prob_precompute):
""" Converts each index sequence to a seqIO seqRecord.
"""
# For each sample
for sample in sample_indexes:
# For each index
for i in range(len(sample_indexes[sample])):
raw_seq = sample_indexes[sample][i]
qual = [index_qual] * len(raw_seq)
# Convert to fastqRecord
record = Fastq(None, raw_seq, qual)
# Calculate base probabilities
record.qual_to_prob(base_prob_precompute)
# Save record
sample_indexes[sample][i] = record
return sample_indexes
class Multiplex:
# Class for the folder of multiplexed reads + barcodes
def __init__(self, folder):
""" Make list of read and barcode files.
"""
self.dir = folder
# Get list of read and barcode paths
self.read_paths = []
self.barcode_paths = []
for fastq in sorted(glob.glob(os.path.join(folder, "*.fastq*"))):
if "barcode_" in os.path.split(fastq)[1]:
self.barcode_paths.append(fastq)
else:
self.read_paths.append(fastq)
# Save whether pairend
if len(self.read_paths) == 1:
self.is_pairend = False
elif len(self.read_paths) == 2:
self.is_pairend = True
else:
sys.exit("There must be 1 or 2 input read fastqs, not {0}".format(
len(self.read_paths)))
# Save whether dualindex
if len(self.barcode_paths) == 1:
self.is_dualindexed = False
elif len(self.barcode_paths) == 2:
self.is_dualindexed = True
else:
sys.exit("There must be 1 or 2 input barcode fastqs, not"
" {0}".format(len(self.barcode_paths)))
return None
def open_handles(self):
""" Opens the file names for reading.
"""
read_handles = [get_handle(filen, 'r') for filen in self.read_paths]
barcode_handles = [get_handle(filen, 'r') for filen
in self.barcode_paths]
return read_handles, barcode_handles
def open_iterators(self, read_handles, barcode_handles):
""" Opens fastq iterators using biopythons SeqIO
"""
# Open iterators for each handle
read_iterators = [fastqIterator(handle) for handle
in read_handles]
barcode_iterators = [fastqIterator(handle) for handle
in barcode_handles]
return read_iterators, barcode_iterators
def iterate(self, base_prob_precompute):
""" Loads the reads and barcode fastqs and yields 1 set at a time.
"""
# Open handles
read_handles, barcode_handles = self.open_handles()
# Open iterators for each handle
read_iterators, barcode_iterators = self.open_iterators(
read_handles, barcode_handles)
# Iterate through records
for r1_record in read_iterators[0]:
# Get read records
read_records = [r1_record]
if self.is_pairend:
read_records.append(next(read_iterators[1]))
# Get barcode records
barcode_records = [next(barcode_iterators[0])]
if self.is_dualindexed:
barcode_records.append(next(barcode_iterators[1]))
# Check that they all have the same title
titles = [record.id.split(" ")[0] for record in read_records + barcode_records]
if len(set(titles)) > 1:
sys.exit('Reads and/or barcodes are not in sync\n'
'{0}'.format(titles))
# Calculate base probabilities for barcodes
for i in range(len(barcode_records)):
barcode_records[i].qual_to_prob(base_prob_precompute)
yield [read_records, barcode_records]
# Close handles
for handle in read_handles + barcode_handles:
handle.close()
def create_folder(folder):
""" Check out folder exists and create a new one.
"""
# Check if it exists
if os.path.exists(folder):
response = input('{0} exists. Would you like to overwrite it? [y/n] '.format(folder))
if response == 'y':
rmtree(folder)
else:
sys.exit()
os.makedirs(folder)
return folder
def get_handle(filen, rw):
""" Returns file handle using gzip if file ends in .gz
"""
if filen.split('.')[-1] == 'gz':
return gzip.open(filen, rw)
else:
return open(filen, rw)
def file_len(fname):
""" Count number of lines in a file.
"""
with get_handle(fname, 'r') as f:
for i, l in enumerate(f):
pass
return i + 1
| mit | 5,872,654,845,560,061,000 | 35.30303 | 93 | 0.599273 | false |
t-wissmann/qutebrowser | tests/helpers/stubs.py | 1 | 17146 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=invalid-name,abstract-method
"""Fake objects/stubs."""
from unittest import mock
import contextlib
import shutil
import attr
from PyQt5.QtCore import pyqtSignal, QPoint, QProcess, QObject, QUrl
from PyQt5.QtGui import QIcon
from PyQt5.QtNetwork import (QNetworkRequest, QAbstractNetworkCache,
QNetworkCacheMetaData)
from PyQt5.QtWidgets import QCommonStyle, QLineEdit, QWidget, QTabBar
from qutebrowser.browser import browsertab, downloads
from qutebrowser.utils import usertypes
from qutebrowser.commands import runners
class FakeNetworkCache(QAbstractNetworkCache):
"""Fake cache with no data."""
def cacheSize(self):
return 0
def data(self, _url):
return None
def insert(self, _dev):
pass
def metaData(self, _url):
return QNetworkCacheMetaData()
def prepare(self, _metadata):
return None
def remove(self, _url):
return False
def updateMetaData(self, _url):
pass
class FakeKeyEvent:
"""Fake QKeyPressEvent stub."""
def __init__(self, key, modifiers=0, text=''):
self.key = mock.Mock(return_value=key)
self.text = mock.Mock(return_value=text)
self.modifiers = mock.Mock(return_value=modifiers)
class FakeWebFrame:
"""A stub for QWebFrame."""
def __init__(self, geometry=None, *, scroll=None, plaintext=None,
html=None, parent=None, zoom=1.0):
"""Constructor.
Args:
geometry: The geometry of the frame as QRect.
scroll: The scroll position as QPoint.
plaintext: Return value of toPlainText
html: Return value of tohtml.
zoom: The zoom factor.
parent: The parent frame.
"""
if scroll is None:
scroll = QPoint(0, 0)
self.geometry = mock.Mock(return_value=geometry)
self.scrollPosition = mock.Mock(return_value=scroll)
self.parentFrame = mock.Mock(return_value=parent)
self.toPlainText = mock.Mock(return_value=plaintext)
self.toHtml = mock.Mock(return_value=html)
self.zoomFactor = mock.Mock(return_value=zoom)
class FakeChildrenFrame:
"""A stub for QWebFrame to test get_child_frames."""
def __init__(self, children=None):
if children is None:
children = []
self.childFrames = mock.Mock(return_value=children)
class FakeQApplication:
"""Stub to insert as QApplication module."""
UNSET = object()
def __init__(self, *, style=None, all_widgets=None, active_window=None,
instance=UNSET, arguments=None, platform_name=None):
if instance is self.UNSET:
self.instance = mock.Mock(return_value=self)
else:
self.instance = mock.Mock(return_value=instance)
self.style = mock.Mock(spec=QCommonStyle)
self.style().metaObject().className.return_value = style
self.allWidgets = lambda: all_widgets
self.activeWindow = lambda: active_window
self.arguments = lambda: arguments
self.platformName = lambda: platform_name
class FakeNetworkReply:
"""QNetworkReply stub which provides a Content-Disposition header."""
KNOWN_HEADERS = {
QNetworkRequest.ContentTypeHeader: 'Content-Type',
}
def __init__(self, headers=None, url=None):
if url is None:
url = QUrl()
if headers is None:
self.headers = {}
else:
self.headers = headers
self.url = mock.Mock(return_value=url)
def hasRawHeader(self, name):
"""Check if the reply has a certain header.
Args:
name: The name of the header as ISO-8859-1 encoded bytes object.
Return:
True if the header is present, False if not.
"""
return name.decode('iso-8859-1') in self.headers
def rawHeader(self, name):
"""Get the raw header data of a header.
Args:
name: The name of the header as ISO-8859-1 encoded bytes object.
Return:
The header data, as ISO-8859-1 encoded bytes() object.
"""
name = name.decode('iso-8859-1')
return self.headers[name].encode('iso-8859-1')
def header(self, known_header):
"""Get a known header.
Args:
known_header: A QNetworkRequest::KnownHeaders member.
"""
key = self.KNOWN_HEADERS[known_header]
try:
return self.headers[key]
except KeyError:
return None
def setHeader(self, known_header, value):
"""Set a known header.
Args:
known_header: A QNetworkRequest::KnownHeaders member.
value: The value to set.
"""
key = self.KNOWN_HEADERS[known_header]
self.headers[key] = value
def fake_qprocess():
"""Factory for a QProcess mock which has the QProcess enum values."""
m = mock.Mock(spec=QProcess)
for name in ['NormalExit', 'CrashExit', 'FailedToStart', 'Crashed',
'Timedout', 'WriteError', 'ReadError', 'UnknownError']:
setattr(m, name, getattr(QProcess, name))
return m
class FakeWebTabScroller(browsertab.AbstractScroller):
"""Fake AbstractScroller to use in tests."""
def __init__(self, tab, pos_perc):
super().__init__(tab)
self._pos_perc = pos_perc
def pos_perc(self):
return self._pos_perc
class FakeWebTabHistory(browsertab.AbstractHistory):
"""Fake for Web{Kit,Engine}History."""
def __init__(self, tab, *, can_go_back, can_go_forward):
super().__init__(tab)
self._can_go_back = can_go_back
self._can_go_forward = can_go_forward
def can_go_back(self):
assert self._can_go_back is not None
return self._can_go_back
def can_go_forward(self):
assert self._can_go_forward is not None
return self._can_go_forward
class FakeWebTabAudio(browsertab.AbstractAudio):
def is_muted(self):
return False
def is_recently_audible(self):
return False
class FakeWebTabPrivate(browsertab.AbstractTabPrivate):
def shutdown(self):
pass
class FakeWebTab(browsertab.AbstractTab):
"""Fake AbstractTab to use in tests."""
def __init__(self, url=QUrl(), title='', tab_id=0, *,
scroll_pos_perc=(0, 0),
load_status=usertypes.LoadStatus.success,
progress=0, can_go_back=None, can_go_forward=None):
super().__init__(win_id=0, private=False)
self._load_status = load_status
self._title = title
self._url = url
self._progress = progress
self.history = FakeWebTabHistory(self, can_go_back=can_go_back,
can_go_forward=can_go_forward)
self.scroller = FakeWebTabScroller(self, scroll_pos_perc)
self.audio = FakeWebTabAudio(self)
self.private_api = FakeWebTabPrivate(tab=self, mode_manager=None)
wrapped = QWidget()
self._layout.wrap(self, wrapped)
def url(self, *, requested=False):
assert not requested
return self._url
def title(self):
return self._title
def progress(self):
return self._progress
def load_status(self):
return self._load_status
def icon(self):
return QIcon()
class FakeSignal:
"""Fake pyqtSignal stub which does nothing.
Attributes:
signal: The name of the signal, like pyqtSignal.
_func: The function to be invoked when the signal gets called.
"""
def __init__(self, name='fake', func=None):
self.signal = '2{}(int, int)'.format(name)
self._func = func
def __call__(self):
if self._func is None:
raise TypeError("'FakeSignal' object is not callable")
return self._func()
def connect(self, slot):
"""Connect the signal to a slot.
Currently does nothing, but could be improved to do some sanity
checking on the slot.
"""
def disconnect(self, slot=None):
"""Disconnect the signal from a slot.
Currently does nothing, but could be improved to do some sanity
checking on the slot and see if it actually got connected.
"""
def emit(self, *args):
"""Emit the signal.
Currently does nothing, but could be improved to do type checking based
on a signature given to __init__.
"""
@attr.s(frozen=True)
class FakeCommand:
"""A simple command stub which has a description."""
name = attr.ib('')
desc = attr.ib('')
hide = attr.ib(False)
debug = attr.ib(False)
deprecated = attr.ib(False)
completion = attr.ib(None)
maxsplit = attr.ib(None)
takes_count = attr.ib(lambda: False)
modes = attr.ib((usertypes.KeyMode.normal, ))
class FakeTimer(QObject):
"""Stub for a usertypes.Timer."""
timeout_signal = pyqtSignal()
def __init__(self, parent=None, name=None):
super().__init__(parent)
self.timeout = mock.Mock(spec=['connect', 'disconnect', 'emit'])
self.timeout.connect.side_effect = self.timeout_signal.connect
self.timeout.disconnect.side_effect = self.timeout_signal.disconnect
self.timeout.emit.side_effect = self._emit
self._started = False
self._singleshot = False
self._interval = 0
self._name = name
def __repr__(self):
return '<{} name={!r}>'.format(self.__class__.__name__, self._name)
def _emit(self):
"""Called when the timeout "signal" gets emitted."""
if self._singleshot:
self._started = False
self.timeout_signal.emit()
def setInterval(self, interval):
self._interval = interval
def interval(self):
return self._interval
def setSingleShot(self, singleshot):
self._singleshot = singleshot
def isSingleShot(self):
return self._singleshot
def start(self, interval=None):
if interval:
self._interval = interval
self._started = True
def stop(self):
self._started = False
def isActive(self):
return self._started
class InstaTimer(QObject):
"""Stub for a QTimer that fires instantly on start().
Useful to test a time-based event without inserting an artificial delay.
"""
timeout = pyqtSignal()
def start(self, interval=None):
self.timeout.emit()
def setSingleShot(self, yes):
pass
def setInterval(self, interval):
pass
@staticmethod
def singleShot(_interval, fun):
fun()
class StatusBarCommandStub(QLineEdit):
"""Stub for the statusbar command prompt."""
got_cmd = pyqtSignal(str)
clear_completion_selection = pyqtSignal()
hide_completion = pyqtSignal()
update_completion = pyqtSignal()
show_cmd = pyqtSignal()
hide_cmd = pyqtSignal()
def prefix(self):
return self.text()[0]
class UrlMarkManagerStub(QObject):
"""Stub for the quickmark-manager or bookmark-manager object."""
added = pyqtSignal(str, str)
removed = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent)
self.marks = {}
def delete(self, key):
del self.marks[key]
self.removed.emit(key)
class BookmarkManagerStub(UrlMarkManagerStub):
"""Stub for the bookmark-manager object."""
class QuickmarkManagerStub(UrlMarkManagerStub):
"""Stub for the quickmark-manager object."""
def quickmark_del(self, key):
self.delete(key)
class SessionManagerStub:
"""Stub for the session-manager object."""
def __init__(self):
self.sessions = []
def list_sessions(self):
return self.sessions
def save_autosave(self):
pass
class TabbedBrowserStub(QObject):
"""Stub for the tabbed-browser object."""
def __init__(self, parent=None):
super().__init__(parent)
self.widget = TabWidgetStub()
self.shutting_down = False
self.loaded_url = None
self.cur_url = None
def on_tab_close_requested(self, idx):
del self.widget.tabs[idx]
def widgets(self):
return self.widget.tabs
def tabopen(self, url):
self.loaded_url = url
def load_url(self, url, *, newtab):
self.loaded_url = url
def current_url(self):
if self.current_url is None:
raise ValueError("current_url got called with cur_url None!")
return self.cur_url
class TabWidgetStub(QObject):
"""Stub for the tab-widget object."""
new_tab = pyqtSignal(browsertab.AbstractTab, int)
def __init__(self, parent=None):
super().__init__(parent)
self.tabs = []
self._qtabbar = QTabBar()
self.index_of = None
self.current_index = None
def count(self):
return len(self.tabs)
def widget(self, i):
return self.tabs[i]
def page_title(self, i):
return self.tabs[i].title()
def tabBar(self):
return self._qtabbar
def indexOf(self, _tab):
if self.index_of is None:
raise ValueError("indexOf got called with index_of None!")
if self.index_of is RuntimeError:
raise RuntimeError
return self.index_of
def currentIndex(self):
if self.current_index is None:
raise ValueError("currentIndex got called with current_index "
"None!")
return self.current_index
def currentWidget(self):
idx = self.currentIndex()
if idx == -1:
return None
return self.tabs[idx - 1]
class HTTPPostStub(QObject):
"""A stub class for HTTPClient.
Attributes:
url: the last url send by post()
data: the last data send by post()
"""
success = pyqtSignal(str)
error = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent)
self.url = None
self.data = None
def post(self, url, data=None):
self.url = url
self.data = data
class FakeDownloadItem(QObject):
"""Mock browser.downloads.DownloadItem."""
finished = pyqtSignal()
def __init__(self, fileobj, name, parent=None):
super().__init__(parent)
self.fileobj = fileobj
self.name = name
self.successful = False
class FakeDownloadManager:
"""Mock browser.downloads.DownloadManager."""
def __init__(self, tmpdir):
self._tmpdir = tmpdir
self.downloads = []
@contextlib.contextmanager
def _open_fileobj(self, target):
"""Ensure a DownloadTarget's fileobj attribute is available."""
if isinstance(target, downloads.FileDownloadTarget):
target.fileobj = open(target.filename, 'wb')
try:
yield target.fileobj
finally:
target.fileobj.close()
else:
yield target.fileobj
def get(self, url, target, **kwargs):
"""Return a FakeDownloadItem instance with a fileobj.
The content is copied from the file the given url links to.
"""
with self._open_fileobj(target):
download_item = FakeDownloadItem(target.fileobj, name=url.path())
with (self._tmpdir / url.path()).open('rb') as fake_url_file:
shutil.copyfileobj(fake_url_file, download_item.fileobj)
self.downloads.append(download_item)
return download_item
class FakeHistoryProgress:
"""Fake for a WebHistoryProgress object."""
def __init__(self):
self._started = False
self._finished = False
self._value = 0
def start(self, _text, _maximum):
self._started = True
def tick(self):
self._value += 1
def finish(self):
self._finished = True
class FakeCommandRunner(runners.AbstractCommandRunner):
def __init__(self, parent=None):
super().__init__(parent)
self.commands = []
def run(self, text, count=None, *, safely=False):
self.commands.append((text, count))
class FakeHintManager:
def __init__(self):
self.keystr = None
def handle_partial_key(self, keystr):
self.keystr = keystr
| gpl-3.0 | 7,100,110,923,038,181,000 | 25.217125 | 79 | 0.613904 | false |
cloudwatt/contrail-controller | src/config/device-manager/device_manager/physical_router_config.py | 1 | 18986 | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of inetconf interface for physical router
configuration manager
"""
from lxml import etree
from ncclient import manager
import copy
class PhysicalRouterConfig(object):
# mapping from contrail family names to junos
_FAMILY_MAP = {
'route-target': '<route-target/>',
'inet-vpn': '<inet-vpn><unicast/></inet-vpn>',
'inet6-vpn': '<inet6-vpn><unicast/></inet6-vpn>',
'e-vpn': '<evpn><signaling/></evpn>'
}
def __init__(self, management_ip, user_creds, vendor, product, vnc_managed, logger=None):
self.management_ip = management_ip
self.user_creds = user_creds
self.vendor = vendor
self.product = product
self.vnc_managed = vnc_managed
self.reset_bgp_config()
self._logger = logger
self.bgp_config_sent = False
# end __init__
def update(self, management_ip, user_creds, vendor, product, vnc_managed):
self.management_ip = management_ip
self.user_creds = user_creds
self.vendor = vendor
self.product = product
self.vnc_managed = vnc_managed
# end update
def send_netconf(self, new_config, default_operation="merge",
operation="replace"):
if (self.vendor is None or self.product is None or
self.vendor.lower() != "juniper" or self.product.lower() != "mx"):
self._logger.info("auto configuraion of physical router is not supported \
on the configured vendor family, ip: %s, not pushing netconf message" % (self.management_ip))
return
if (self.vnc_managed is None or self.vnc_managed == False):
self._logger.info("vnc managed property must be set for a physical router to get auto \
configured, ip: %s, not pushing netconf message" % (self.management_ip))
return
try:
with manager.connect(host=self.management_ip, port=22,
username=self.user_creds['username'],
password=self.user_creds['password'],
unknown_host_cb=lambda x, y: True) as m:
add_config = etree.Element(
"config",
nsmap={"xc": "urn:ietf:params:xml:ns:netconf:base:1.0"})
config = etree.SubElement(add_config, "configuration")
config_group = etree.SubElement(config, "groups", operation=operation)
contrail_group = etree.SubElement(config_group, "name")
contrail_group.text = "__contrail__"
if isinstance(new_config, list):
for nc in new_config:
config_group.append(nc)
else:
config_group.append(new_config)
apply_groups = etree.SubElement(config, "apply-groups", operation=operation)
apply_groups.text = "__contrail__"
self._logger.info("\nsend netconf message: %s\n" % (etree.tostring(add_config, pretty_print=True)))
m.edit_config(
target='candidate', config=etree.tostring(add_config),
test_option='test-then-set',
default_operation=default_operation)
m.commit()
except Exception as e:
if self._logger:
self._logger.error("Router %s: %s" % (self.management_ip,
e.message))
# end send_config
def add_routing_instance(self, name, import_targets, export_targets,
prefixes=[], gateways=[], router_external=False, interfaces=[], vni=None):
self.routing_instances[name] = {'import_targets': import_targets,
'export_targets': export_targets,
'prefixes': prefixes,
'gateways': gateways,
'router_external': router_external,
'interfaces': interfaces,
'vni': vni}
ri_config = self.ri_config or etree.Element("routing-instances")
policy_config = self.policy_config or etree.Element("policy-options")
firewall_config = None
ri = etree.SubElement(ri_config, "instance", operation="replace")
ri_name = "__contrail__" + name.replace(':', '_')
etree.SubElement(ri, "name").text = ri_name
if vni is not None:
etree.SubElement(ri, "instance-type").text = "virtual-switch"
else:
etree.SubElement(ri, "instance-type").text = "vrf"
if vni is None:
for interface in interfaces:
if_element = etree.SubElement(ri, "interface")
etree.SubElement(if_element, "name").text = interface
etree.SubElement(ri, "vrf-import").text = ri_name + "-import"
etree.SubElement(ri, "vrf-export").text = ri_name + "-export"
if vni is None:
etree.SubElement(ri, "vrf-table-label")
ri_opt = None
if prefixes and vni is None:
ri_opt = etree.SubElement(ri, "routing-options")
static_config = etree.SubElement(ri_opt, "static")
for prefix in prefixes:
route_config = etree.SubElement(static_config, "route")
etree.SubElement(route_config, "name").text = prefix
etree.SubElement(route_config, "discard")
auto_export = "<auto-export><family><inet><unicast/></inet></family></auto-export>"
ri_opt.append(etree.fromstring(auto_export))
if router_external and vni is None:
if ri_opt is None:
ri_opt = etree.SubElement(ri, "routing-options")
static_config = etree.SubElement(ri_opt, "static")
route_config = etree.SubElement(static_config, "route")
etree.SubElement(route_config, "name").text = "0.0.0.0/0"
etree.SubElement(route_config, "next-table").text = "inet.0"
# add policies for export route targets
ps = etree.SubElement(policy_config, "policy-statement")
etree.SubElement(ps, "name").text = ri_name + "-export"
term = etree.SubElement(ps, "term")
etree.SubElement(term, "name").text= "t1"
then = etree.SubElement(term, "then")
for route_target in export_targets:
comm = etree.SubElement(then, "community")
etree.SubElement(comm, "add")
etree.SubElement(comm, "community-name").text = route_target.replace(':', '_')
etree.SubElement(then, "accept")
# add policies for import route targets
ps = etree.SubElement(policy_config, "policy-statement")
etree.SubElement(ps, "name").text = ri_name + "-import"
term = etree.SubElement(ps, "term")
etree.SubElement(term, "name").text= "t1"
from_ = etree.SubElement(term, "from")
for route_target in import_targets:
target_name = route_target.replace(':', '_')
etree.SubElement(from_, "community").text = target_name
then = etree.SubElement(term, "then")
etree.SubElement(then, "accept")
then = etree.SubElement(ps, "then")
etree.SubElement(then, "reject")
# add firewall config for public VRF
forwarding_options_config = None
if router_external:
forwarding_options_config = self.forwarding_options_config or etree.Element("forwarding-options")
fo = etree.SubElement(forwarding_options_config, "family")
inet = etree.SubElement(fo, "inet")
f = etree.SubElement(inet, "filter")
etree.SubElement(f, "input").text = "redirect_to_" + ri_name + "_vrf"
firewall_config = self.firewall_config or etree.Element("firewall")
f = etree.SubElement(firewall_config, "filter")
etree.SubElement(f, "name").text = "redirect_to_" + ri_name + "_vrf"
term = etree.SubElement(f, "term")
etree.SubElement(term, "name").text= "t1"
from_ = etree.SubElement(term, "from")
if prefixes:
etree.SubElement(from_, "destination-address").text = ';'.join(prefixes)
then_ = etree.SubElement(term, "then")
etree.SubElement(then_, "routing-instance").text = ri_name
term = etree.SubElement(f, "term")
etree.SubElement(term, "name").text= "t2"
then_ = etree.SubElement(term, "then")
etree.SubElement(then_, "accept")
# add L2 EVPN and BD config
bd_config = None
interfaces_config = None
proto_config = None
if vni is not None and self.is_family_configured(self.bgp_params, "e-vpn"):
etree.SubElement(ri, "vtep-source-interface").text = "lo0.0"
rt_element = etree.SubElement(ri, "vrf-target")
#fix me, check if this is correct target value for vrf-target
for route_target in import_targets:
etree.SubElement(rt_element, "community").text = route_target
bd_config = etree.SubElement(ri, "bridge-domains")
bd= etree.SubElement(bd_config, "domain")
etree.SubElement(bd, "name").text = "bd-" + str(vni)
etree.SubElement(bd, "vlan-id").text = str(vni)
vxlan = etree.SubElement(bd, "vxlan")
etree.SubElement(vxlan, "vni").text = str(vni)
etree.SubElement(vxlan, "ingress-node-replication")
for interface in interfaces:
if_element = etree.SubElement(bd, "interface")
etree.SubElement(if_element, "name").text = interface
etree.SubElement(bd, "routing-interface").text = "irb." + str(vni) #vni is unique, hence irb
evpn_proto_config = etree.SubElement(ri, "protocols")
evpn = etree.SubElement(evpn_proto_config, "evpn")
etree.SubElement(evpn, "encapsulation").text = "vxlan"
etree.SubElement(evpn, "extended-vni-all")
interfaces_config = self.interfaces_config or etree.Element("interfaces")
irb_intf = etree.SubElement(interfaces_config, "interface")
etree.SubElement(irb_intf, "name").text = "irb"
etree.SubElement(irb_intf, "gratuitous-arp-reply")
if gateways is not None:
intf_unit = etree.SubElement(irb_intf, "unit")
etree.SubElement(intf_unit, "name").text = str(vni)
family = etree.SubElement(intf_unit, "family")
inet = etree.SubElement(family, "inet")
for gateway in gateways:
addr = etree.SubElement(inet, "address")
etree.SubElement(addr, "name").text = gateway + "/24"
lo_intf = etree.SubElement(interfaces_config, "interface")
etree.SubElement(lo_intf, "name").text = "lo0"
intf_unit = etree.SubElement(lo_intf, "unit")
etree.SubElement(intf_unit, "name").text = "0"
family = etree.SubElement(intf_unit, "family")
inet = etree.SubElement(family, "inet")
addr = etree.SubElement(inet, "address")
etree.SubElement(addr, "name").text = self.bgp_params['address'] + "/32"
etree.SubElement(addr, "primary")
etree.SubElement(addr, "preferred")
for interface in interfaces:
intf = etree.SubElement(interfaces_config, "interface")
intfparts = interface.split(".")
etree.SubElement(intf, "name").text = intfparts[0]
etree.SubElement(intf, "encapsulation").text = "ethernet-bridge"
intf_unit = etree.SubElement(intf, "unit")
etree.SubElement(intf_unit, "name").text = intfparts[1]
family = etree.SubElement(intf_unit, "family")
etree.SubElement(family, "bridge")
proto_config = self.proto_config or etree.Element("protocols")
mpls = etree.SubElement(proto_config, "mpls")
intf = etree.SubElement(mpls, "interface")
etree.SubElement(intf, "name").text = "all"
self.forwarding_options_config = forwarding_options_config
self.firewall_config = firewall_config
self.policy_config = policy_config
self.proto_config = proto_config
self.interfaces_config = interfaces_config
self.route_targets |= import_targets | export_targets
self.ri_config = ri_config
# end add_routing_instance
def is_family_configured(self, params, family_name):
if params is None or params.get('address_families') is None:
return False
families = params['address_families'].get('family', [])
if family_name in families:
return True
return False
def _add_family_etree(self, parent, params):
if params.get('address_families') is None:
return
family_etree = etree.SubElement(parent, "family")
for family in params['address_families'].get('family', []):
if family in self._FAMILY_MAP:
family_subtree = etree.fromstring(self._FAMILY_MAP[family])
family_etree.append(family_subtree)
else:
etree.SubElement(family_etree, family)
# end _add_family_etree
def set_bgp_config(self, params):
self.bgp_params = params
if (self.vnc_managed is None or self.vnc_managed == False):
if self.bgp_config_sent:
# user must have unset the vnc managed property, so temporaly set it
# for deleting the existing config
self.vnc_managed = True
self.delete_bgp_config()
self.vnc_managed = False
return
return
# end set_bgp_config
def _get_bgp_config_xml(self, external=False):
if self.bgp_params is None:
return None
bgp_config = etree.Element("group", operation="replace")
if external:
etree.SubElement(bgp_config, "name").text = "__contrail_external__"
etree.SubElement(bgp_config, "type").text = "external"
else:
etree.SubElement(bgp_config, "name").text = "__contrail__"
etree.SubElement(bgp_config, "type").text = "internal"
etree.SubElement(bgp_config, "multihop")
local_address = etree.SubElement(bgp_config, "local-address")
local_address.text = self.bgp_params['address']
self._add_family_etree(bgp_config, self.bgp_params)
etree.SubElement(bgp_config, "keep").text = "all"
return bgp_config
# end _get_bgp_config_xml
def reset_bgp_config(self):
self.routing_instances = {}
self.bgp_params = None
self.ri_config = None
self.interfaces_config = None
self.policy_config = None
self.firewall_config = None
self.forwarding_options_config = None
self.proto_config = None
self.route_targets = set()
self.bgp_peers = {}
self.external_peers = {}
# ene reset_bgp_config
def delete_bgp_config(self):
if not self.bgp_config_sent:
return
self.reset_bgp_config()
self.send_netconf([], default_operation="none", operation="delete")
self.bgp_config_sent = False
# end delete_config
def add_bgp_peer(self, router, params, external):
if external:
self.external_peers[router] = params
else:
self.bgp_peers[router] = params
self.send_bgp_config()
# end add_peer
def delete_bgp_peer(self, router):
if router in self.bgp_peers:
del self.bgp_peers[router]
elif router in self.external_peers:
del self.external_peers[rotuer]
else:
return
self.send_bgp_config()
# end delete_bgp_peer
def _get_neighbor_config_xml(self, bgp_config, peers):
for peer, params in peers.items():
nbr = etree.SubElement(bgp_config, "neighbor")
etree.SubElement(nbr, "name").text = peer
bgp_sessions = params.get('session')
if bgp_sessions:
# for now assume only one session
session_attrs = bgp_sessions[0].get('attributes', [])
for attr in session_attrs:
# For not, only consider the attribute if bgp-router is
# not specified
if attr.get('bgp_router') is None:
self._add_family_etree(nbr, attr)
break
if params.get('autonomous_system') is not None:
etree.SubElement(nbr, "peer-as").text = str(params.get('autonomous_system'))
# end _get_neighbor_config_xml
def send_bgp_config(self):
bgp_config = self._get_bgp_config_xml()
if bgp_config is None:
return
proto_config = etree.Element("protocols")
bgp = etree.SubElement(proto_config, "bgp")
bgp.append(bgp_config)
self._get_neighbor_config_xml(bgp_config, self.bgp_peers)
if self.external_peers is not None:
ext_grp_config = self._get_bgp_config_xml(True)
bgp.append(ext_grp_config)
self._get_neighbor_config_xml(ext_grp_config, self.external_peers)
routing_options_config = etree.Element("routing-options")
etree.SubElement(
routing_options_config,
"route-distinguisher-id").text = self.bgp_params['identifier']
etree.SubElement(routing_options_config, "autonomous-system").text = \
str(self.bgp_params.get('autonomous_system'))
config_list = [proto_config, routing_options_config]
if self.ri_config is not None:
config_list.append(self.ri_config)
for route_target in self.route_targets:
comm = etree.SubElement(self.policy_config, "community")
etree.SubElement(comm, 'name').text = route_target.replace(':', '_')
etree.SubElement(comm, 'members').text = route_target
if self.interfaces_config is not None:
config_list.append(self.interfaces_config)
if self.policy_config is not None:
config_list.append(self.policy_config)
if self.firewall_config is not None:
config_list.append(self.firewall_config)
if self.forwarding_options_config is not None:
config_list.append(self.forwarding_options_config)
if self.proto_config is not None:
config_list.append(self.proto_config)
self.send_netconf(config_list)
self.bgp_config_sent = True
# end send_bgp_config
# end PhycalRouterConfig
| apache-2.0 | 3,724,265,717,954,849,300 | 45.879012 | 115 | 0.577952 | false |
FedoraScientific/salome-paravis | test/VisuPrs/CutLines/E8.py | 1 | 1502 | # Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
# This case corresponds to: /visu/CutLines/E8 case
# Create Cut Lines for all data of the given MED file
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Create presentations
myParavis = paravis.myParavis
# Directory for saving snapshots
picturedir = get_picture_dir("CutLines/E8")
file = datadir + "KCOUPLEX1.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "CreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.CUTLINES], picturedir, pictureext)
| lgpl-2.1 | -1,939,744,444,780,391,400 | 37.512821 | 81 | 0.733023 | false |
sgnn7/sgfc | communication/comms.py | 1 | 2263 | #!/usr/bin/env python2
import time
from devices.zigbee_xbee import XBeeCommDevice
from protobufs import sgfc_pb2 as fc_proto
def test_comms():
dev1 = None
dev2 = None
fc_message = fc_proto.FlightMessage()
fc_message.sender = "Me"
payload = fc_proto.Payload()
payload.type = fc_proto.GPS_POSITION
payload.gps_position.has_fix = False
payload.gps_position.latitude = 1.1111
payload.gps_position.longitude = 22.222
payload.gps_position.altitude = 333.33
payload.gps_position.speed = 4444.4
fc_message.payload.extend([payload])
print(fc_message)
def callback(data):
print("Client got a message!")
proto_message = fc_proto.FlightMessage()
proto_message.ParseFromString(data)
print("Size: %d bytes" % (len(data),))
print('=' * 40)
print(proto_message)
print('=' * 40)
def error_callback(error):
print("Client got error: %s" % (error,))
# TODO: argparse the device
try:
dev1 = XBeeCommDevice('/dev/ttyUSB0', '\x00\x01',
callback=callback,
error_callback=error_callback,
network_id='\xab\xcd')
dev2 = XBeeCommDevice('/dev/ttyUSB1', '\x00\x02',
callback=callback,
error_callback=error_callback,
network_id='\xab\xcd')
print('')
dev2.tx('\x00\x01', fc_message.SerializeToString())
time.sleep(1)
print('')
dev1.tx('\x00\x02', fc_message.SerializeToString())
time.sleep(1)
print('')
print("Testing high-speed transfer")
serialized_message = fc_message.SerializeToString()
start = time.time()
for index in range(100):
dev1.tx('\x00\x02', serialized_message)
dev2.tx('\x00\x02', serialized_message)
end = time.time()
time.sleep(1)
print("Elapsed: %.2fs" % (end - start,))
except Exception as e:
print(e)
print('')
print("Cleaning up")
if dev1:
dev1.close()
if dev2:
dev2.close()
print("Done")
if __name__ == '__main__':
test_comms()
| lgpl-2.1 | 3,578,514,160,517,611,000 | 23.333333 | 60 | 0.549271 | false |
dyve/django-leaflet | setup.py | 1 | 1651 |
import os
from setuptools import setup, find_packages
import sys
here = os.path.abspath(os.path.dirname(__file__))
import codecs
requires = ['Django']
if sys.version_info < (2, 7):
requires += ['ordereddict']
setup(
name='django-leaflet',
version='0.18.1.dev0',
author='Mathieu Leplatre',
author_email='[email protected]',
url='https://github.com/makinacorpus/django-leaflet',
download_url="http://pypi.python.org/pypi/django-leaflet/",
description="Use Leaflet in your django projects",
long_description=codecs.open(
os.path.join(
here, 'README.rst'), 'r', 'utf-8').read() + '\n\n' +
codecs.open(
os.path.join(here, 'CHANGES'),
'r', 'utf-8').read(),
license='LPGL, see LICENSE file.',
install_requires=requires,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=['Topic :: Utilities',
'Natural Language :: English',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'],
)
| lgpl-3.0 | -4,729,816,257,460,421,000 | 37.395349 | 68 | 0.522108 | false |
daisychainme/daisychain | daisychain/channel_dropbox/tests/test_models.py | 1 | 1492 | from django.contrib.auth.models import User
from django.test import TestCase
from .models import DropboxAccount, DropboxUser
class TestModelsDropboxAccount(TestCase):
def test_account_str_len(self):
user = User.objects.create_user('John')
dbx_account = DropboxAccount(
user = user,
access_token = 'test_access_token',
cursor = ''
)
dbx_account.save()
string = str(dbx_account)
self.assertEqual(string,
"DropboxAccount belongs to user {}".format(
user))
self.assertEqual(len(DropboxAccount.objects.all()), 1)
class TestModelsDropboxUser(TestCase):
def test_user_str_len(self):
user = User.objects.create_user('John')
dbx_account = DropboxAccount(
user = user,
access_token = '_test_access_token',
cursor = '',
)
dbx_account.save()
dbx_user = DropboxUser(
dropbox_account = dbx_account,
dropbox_userid = 4211,
display_name = "John Doe",
email = "[email protected]",
profile_photo_url = "url.to/the_profile_photo",
disk_used = 4234.234,
disk_allocated = 12345678.4444
)
dbx_user.save()
string = str(dbx_user)
self.assertEqual(string, "Dropbox User #4211 belongs to DropboxAccount {}".format(
dbx_account))
self.assertEqual(len(User.objects.all()), 1)
| mit | 4,904,110,218,897,328,000 | 32.909091 | 90 | 0.577748 | false |
cloudkeep/symantecssl | symantecssl/order.py | 1 | 4443 | from __future__ import absolute_import, division, print_function
import requests
from lxml import etree
from symantecssl.request_models import RequestEnvelope as ReqEnv
class FailedRequest(Exception):
def __init__(self, response):
super(FailedRequest, self).__init__()
self.response = response
def _prepare_request(request_model, credentials):
"""
Prepare the request for execution.
:param request_model: an object with a ``serialize`` method that returns
some LXML Etrees.
:param dict credentials: A dictionary containing the following keys:
- ``partner_code``
- ``username``
- ``password``
:return: a 2-tuple of C{bytes} - the contents of the request and C{dict}
mapping C{bytes} to C{bytes} - the HTTP headers for the request.
"""
request_model.set_credentials(**credentials)
model = ReqEnv(request_model=request_model)
serialized_xml = etree.tostring(model.serialize(), pretty_print=True)
headers = {'Content-Type': 'application/soap+xml'}
return (serialized_xml, headers)
def _parse_response(request_model, response, status_code, response_content):
"""
Parse a response from Symantec.
:param request_model: an object with a ``response_model`` attribute,
representing the request that this response maps to.
:param response: An HTTP response object; used only to instantiate
:obj:`FailedRequest`.
:param int status_code: The HTTP status code of the response.
:param bytes response_content: The bytes of the response.
:return: some LXML DOM nodes.
"""
# Symantec not expected to return 2xx range; only 200
if status_code != 200:
raise FailedRequest(response)
xml_root = etree.fromstring(response_content)
return request_model.response_model.deserialize(xml_root)
def post_request(endpoint, request_model, credentials):
"""Create a post request against Symantec's SOAPXML API.
Currently supported Request Models are:
GetModifiedOrders
QuickOrderRequest
note:: the request can take a considerable amount of time if the
date range covers a large amount of changes.
note:: credentials should be a dictionary with the following values:
partner_code
username
password
Access all data from response via models
:param endpoint: Symantec endpoint to hit directly
:param request_model: request model instance to initiate call type
:type request_model: :obj:`symantecssl.request_models.Request`
:param credentials: Symantec specific credentials for orders.
:return response: deserialized response from API
"""
serialized_xml, headers = _prepare_request(request_model, credentials)
response = requests.post(endpoint, serialized_xml, headers=headers)
setattr(response, "model", None)
deserialized = _parse_response(request_model, response,
response.status_code, response.content)
setattr(response, "model", deserialized)
return response
def _after(something):
def decorator(decoratee):
return something.addCallback(decoratee)
return decorator
def post_request_treq(treq, endpoint, request_model, credentials):
"""
Like ``post_request``, but using the Twisted HTTP client in ``treq``.
:param treq: the ``treq`` module to use; either the treq module itself or
an HTTPClient with an added ``.content`` attribute like
``treq.content``.
:param text_type endpoint: the URL of the full Symantec endpoint for either
orders or queries
:param request_model: the request to issue to symantec.
:type request_model: :obj:`symantecssl.request_models.Request`
:return: a Deferred firing with an instance of the appropriate response
model for ``request_model`` looked up via the ``.response_model``
attribute on it, or failing with ``FailedRequest``.
"""
serialized_xml, headers = _prepare_request(request_model, credentials)
@_after(treq.post(endpoint, serialized_xml, headers=headers))
def posted(response):
@_after(treq.content(response))
def content(response_content):
deserialized = _parse_response(request_model, response,
response.code, response_content)
return deserialized
return content
return posted
| apache-2.0 | -4,661,121,772,519,694,000 | 33.984252 | 79 | 0.684898 | false |
Dangerpuss/Dumpster | winDATget/Forget.py | 1 | 1457 | from subprocess import Popen
import os
import csv
srcfile = "/" + input('File Input Name: ')
dirpath = os.path.dirname(__file__)
srcpath = os.path.dirname(__file__) + srcfile
with open(srcpath, newline='') as f:
reader = csv.reader(f)
for row in reader:
host = (row[0])
user = (row[1])
newpath = os.path.dirname(__file__) + "\\" + host
os.mkdir(newpath)
p = open(newpath + '\{}'.format(host) + '.bat', 'w')
p.write('net use x: \\\{}'.format(host) + '\c$' + '\n')
p.write(r'xcopy /H x:\Users\{}'.format(user) + r'\AppData\Local\Microsoft\Windows\History\History.IE5\index.dat ' + newpath + '\n')
p.write(r'attrib -s -h ' + newpath + '/index.dat' + '\n')
p.write(r'ren ' + newpath + '\index.dat {}'.format(user) +'_History.dat' + '\n')
p.write(r'xcopy /H "x:\Users\{}'.format(user) + r'\AppData\Local\Microsoft\Windows\Temporary Internet Files\Low\Content.IE5\index.dat" ' + newpath + '\n')
p.write(r'attrib -s -h ' + newpath + '\index.dat' + '\n')
p.write(r'ren ' + newpath + '\index.dat {}'.format(user) +'_Temp.dat' + '\n')
p.write(r'xcopy /H x:\Windows\System32\winevt\Logs\Security.evtx ' + newpath + '\n')
p.write(r'xcopy /H x:\Windows\System32\winevt\Logs\System.evtx ' + newpath + '\n')
p.write(r'xcopy /H x:\Windows\System32\winevt\Logs\Application.evtx ' + newpath + '\n')
p.write('net use x: /d')
p.close()
p = Popen(newpath + '\{}'.format(host) + '.bat')
stdout, stderr = p.communicate()
| gpl-2.0 | 2,042,619,565,739,580,000 | 39.472222 | 156 | 0.607412 | false |
BillFoland/daisyluAMR | system/daisylu_system.py | 1 | 13721 |
import os
import sys
import pickle
import pandas as pd
import numpy as np
import hashlib
import os.path
from daisylu_config import *
from daisylu_vectors import *
from sentences import *
from daisylu_output import *
""
def addWikificationToDFrames(sents, sTypes, sentenceAttr):
# need to split this up into manageable file sizes, the wikifier dies with out of memory error currently
maxPartitionLen=200000
resultsDir = getSystemPath('daisyluPython') + '/wikificationData'
# if the working directory does not exist, create it.
# './wikificationData/input/test0.txt'
if not os.path.exists(resultsDir):
os.makedirs(resultsDir)
if not os.path.exists(resultsDir+'/input'):
os.makedirs(resultsDir+'/input')
if not os.path.exists(resultsDir+'/output'):
os.makedirs(resultsDir+'/output')
for sType in sTypes:
partitions = [ {'textString':'', 'charMapper':{}} ]
for sentIX in range(len(sents[sType])):
if len(partitions[-1]['textString']) > maxPartitionLen:
partitions.append( {'textString':'', 'charMapper':{}})
print 'THERE ARE NOW %d PARTITIONS' % len(partitions)
print '====================================================================================================================='
print '====================================================================================================================='
print
sentence = sents[sType][sentIX]
if not sentIX % 100: print 'addWikificationToDFrames', sType, sentIX
if not hasattr(sentence, sentenceAttr):
continue
sdf = getattr(sentence, sentenceAttr)
if sdf.empty:
continue
sdf['NERForm'] = ''
sdf['NERLabel'] = 'O'
sdf['WKCategory'] = ''
sdf['WKLink'] = ''
sdf['WKLinker'] = np.nan
sdf['WKRanker'] = np.nan
df = sdf[['wordIX','words','txFunc','txBIOES','nameCategory','wiki']].copy()
df['type'] = sType
df['sentIX'] = sentIX
df['allTStart'] = -1
df['allTEnd'] = -1
for i,t in enumerate(sentence.tokens):
startOffset = len(partitions[-1]['textString'])
partitions[-1]['textString'] += t
endOffset = len(partitions[-1]['textString'])
if (any(df.wordIX == i)):
df['allTStart']=startOffset
df['allTEnd']=endOffset
partitions[-1]['charMapper'][startOffset] = (sentIX, i, t)
partitions[-1]['textString'] += ' '
partitions[-1]['textString'] += '\n\n'
allText = ''
for x in partitions:
allText += x['textString']
m = hashlib.md5()
m.update(allText)
md5 = m.hexdigest()
print md5
cacheFn = 'wikificationData/' + md5 + '.pcl'
if not os.path.isfile(cacheFn): # calculate and archive the info, use it later if the same set of sentences is called for
wconfigs = []
wconfigs.append({
'config' : 'configs/STAND_ALONE_NO_INFERENCE.xml',
'inputFn' : resultsDir + '/input/test%d.txt',
'outputDn' : '%s/output/' % resultsDir,
})
info = { 'NER':{}, 'wiki':{} }
# partitions = pickle.load( open( 'wikiPartitions.pcl' ) )
"""
If you're using this system, please cite the paper.
Relational Inference for Wikification
Xiao Cheng and Dan Roth
EMNLP 2013
"""
for p, partition in enumerate(partitions):
for wtype in wconfigs:
tfile = open(wtype['inputFn'] % p, 'wb')
tfile.write(partition['textString'])
tfile.close()
direc = getSystemPath('Wikifier2013')
config = wtype['config']
inputFn = wtype['inputFn'] % p
outputDn = wtype['outputDn']
stencil = '/usr/bin/java -Xmx10G -jar dist/wikifier-3.0-jar-with-dependencies.jar -annotateData %s %s false %s'
cmd = stencil % (inputFn, outputDn, config)
print cmd
errorCode = os.system('cd %s; %s' % (direc, cmd) )
if errorCode:
raise ValueError('ERROR!\n non zero error code %d' % errorCode)
exit(1)
import xmltodict
from bs4 import BeautifulSoup
for p, partition in enumerate(partitions):
print 'Partition %d' % p
charMapper = partitions[p]['charMapper']
html = open('%s/output/' % resultsDir + '/test%d.txt.NER.tagged' % p).read()
parsed_html = BeautifulSoup(html, "lxml")
ner={ 'start':[], 'end':[], 'form':[], 'label':[]}
for item in parsed_html.find_all('start'):
ner['start'].append(int(item.text))
for item in parsed_html.find_all('end'):
ner['end'].append(int(item.text))
for item in parsed_html.find_all('form'):
ner['form'].append(item.text)
for item in parsed_html.find_all('label' ):
ner['label'].append(item.text)
for i in range(len(ner['start'])):
if not i % 100: print 'ner', i
tset = set()
for z in range(ner['start'][i],ner['end'][i]):
if z in charMapper:
tset.add(charMapper[z])
for trip in list(tset):
(six, wix, _) = trip
if not six in info['NER']:
info['NER'][six] = { 'NERForm':{}, 'NERLabel':{} }
info['NER'][six]['NERForm'][wix] = ner['form'][i]
info['NER'][six]['NERLabel'][wix] = ner['label'][i]
with open('%s/output/' % resultsDir + '/test%d.txt.wikification.tagged.full.xml' % p) as fd:
obj = xmltodict.parse(fd.read())
if obj['WikifierOutput']['WikifiedEntities']:
entities = obj['WikifierOutput']['WikifiedEntities']['Entity']
for entity in entities:
#entityText = entity['EntitySurfaceForm']
entityStartOffset = int(entity['EntityTextStart'])
entityEndOffset = int(entity['EntityTextEnd'])
linkerScore = float(entity['LinkerScore'])
rankerScore = float(entity['TopDisambiguation']['RankerScore'])
wikiTitle = entity['TopDisambiguation']['WikiTitle']
attributes = entity['TopDisambiguation']['Attributes']
#print entityText, entityStartOffset, entityEndOffset, textString[entityStartOffset:entityEndOffset]
tset = set()
for z in range(entityStartOffset,entityEndOffset+1):
if z in charMapper:
tset.add(charMapper[z])
for trip in list(tset):
(six, wix, _) = trip
if not six in info['wiki']:
info['wiki'][six] = { 'WKCategory':{}, 'WKLink':{}, 'WKLinker':{}, 'WKRanker':{} }
info['wiki'][six]['WKCategory'][wix] = attributes
info['wiki'][six]['WKLink'][wix] = wikiTitle
info['wiki'][six]['WKLinker'][wix] = linkerScore
info['wiki'][six]['WKRanker'][wix] = rankerScore
pickle.dump( info, open( cacheFn, "wb" ) )
else:
info = pickle.load( open( cacheFn, "rb" ) )
for six in info['NER']:
sentence = sents[sType][six]
if not hasattr(sentence, sentenceAttr):
continue
sdf = getattr(sentence, sentenceAttr)
for wix in info['NER'][six]['NERForm']:
sdf.loc[ (sdf.wordIX == wix), 'NERForm'] = info['NER'][six]['NERForm'][wix]
sdf.loc[ (sdf.wordIX == wix), 'NERLabel'] = info['NER'][six]['NERLabel'][wix]
for six in info['wiki']:
sentence = sents[sType][six]
if not hasattr(sentence, sentenceAttr):
continue
sdf = getattr(sentence, sentenceAttr)
for wix in info['wiki'][six]['WKCategory']:
sdf.loc[ (sdf.wordIX == wix), 'WKCategory'] = info['wiki'][six]['WKCategory'][wix]
sdf.loc[ (sdf.wordIX == wix), 'WKLink'] = info['wiki'][six]['WKLink'][wix]
sdf.loc[ (sdf.wordIX == wix), 'WKLinker'] = info['wiki'][six]['WKLinker'][wix]
sdf.loc[ (sdf.wordIX == wix), 'WKRanker'] = info['wiki'][six]['WKRanker'][wix]
def initializePredictionDataFrames(sents, ixList=None, NEWPrediction=False):
if not ixList:
ixList = range(len(sents['test']))
for sentIX in ixList:
sentence = sents['test'][sentIX]
tagList = getSentenceDFTagList()
if NEWPrediction:
tagList += ['NEWPrediction']
df = pd.DataFrame( columns=tagList )
if not (sentIX %1000):
print 'initializing pred frome ', sentIX
df['wordIX'] = range(len(sentence.tokens))
df['sentIX'] = sentIX
df['words'] = sentence.tokens
df['txBIOES'] = 'O'
sentence.predictedDFrame = df
addWikificationToDFrames(sents, ['test'], 'predictedDFrame')
print 'CLIPPING ALL SENTENCES TO LENGTH 100'
for sentIX in ixList:
sentence = sents['test'][sentIX]
sentence.predictedDFrame = sentence.predictedDFrame[sentence.predictedDFrame['wordIX'] < 100]
def createVectorsFromDataFrames(sents, sentenceAttr, dbf, dbtf, systemName, keepSense=True, L0OnlyFromFeaturesDB=False, useDistSG=False ):
wordDF = []
dbfn = getSystemPath('daisylu') + 'data/%s' % dbf
dbTestFn = getSystemPath('daisylu') + 'data/%s' % dbtf
merged = mergeSentenceDataFrames(None, ['test'], None, sents=sents, sentenceAttr=sentenceAttr)
if systemName== 'AMRL0NoNER':
createAMRL0Vectors(None, dbTestFn, 100.0, keepSense, sTypes=['test'], vectors=merged, featuresDB=dbfn, maxSents=None, useNER=False)
elif systemName== 'AMRL0':
wordDF = createAMRL0Vectors(None, dbTestFn, 100.0, keepSense, sTypes=['test'], vectors=merged, featuresDB=dbfn, maxSents=None )
elif systemName== 'AMRL0Args':
createAMRL0ArgVectors(None, dbTestFn, 100.0, keepSense, sTypes=['test'], vectors=merged, featuresDB=dbfn, maxSents=None,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG )
elif systemName== 'AMRL0Nargs':
createAMRL0NargVectors(None, dbTestFn, 100.0, keepSense, sTypes=['test'], vectors=merged, featuresDB=dbfn, maxSents=None,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG )
elif systemName== 'AMRL0Attr':
createAMRL0AttrVectors(None, dbTestFn, 100.0, keepSense, sTypes=['test'], vectors=merged, featuresDB=dbfn, maxSents=None,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG )
elif systemName== 'AMRL0Ncat':
createAMRL0NcatVectors(None, dbTestFn, 100.0, keepSense, sTypes=['test'], vectors=merged, featuresDB=dbfn, maxSents=None,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG )
else:
assert('error, invalid system name')
return wordDF
def runKerasNetwork(networkType, vectorDBFn, modelFn, resultsFn, sType='test'):
direc = getSystemPath( 'NNModels' )
mm, ww = modelFn.split('@')
cmd = getSystemPath( 'python' )
cmd = cmd +' AMR_NN_Forward.py -v %s -m %s -w %s -r %s -s %s' % ('../data/' + vectorDBFn, mm, ww, '../results/' + resultsFn, sType)
print direc, cmd
print direc, cmd
print direc, cmd
errorCode = os.system('cd %s; %s' % (direc, cmd) )
if errorCode:
raise ValueError('ERROR!\n non zero error code %d' % errorCode)
exit(1)
if errorCode:
print vectorDBFn, modelFn, resultsFn, sType
raise ValueError('ERROR!\n non zero error code %d' % errorCode)
exit(1)
def runNetwork(networkType, vectorDBFn, modelFn, resultsFn, sType='test'):
if '@' in modelFn:
runKerasNetwork(networkType, vectorDBFn, modelFn, resultsFn, sType)
else:
assert('error, Torch networks are no longer supported')
| mit | 6,644,411,702,054,767,000 | 47.487633 | 142 | 0.501494 | false |
novafloss/django-formidable | formidable/forms/__init__.py | 1 | 7650 | # -*- coding: utf-8 -*-
"""
This module exposes everything needed to generate a standard django form class
from a formidable object.
Given a formidable object, you can use :func:`get_dynamic_form_class` to get
its corresponding django form class.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from django import forms
from django.db.models import Prefetch
from formidable.forms import field_builder
from formidable.forms.conditions import conditions_register
from formidable.models import Access, Formidable, Item
class FormidableBoundFieldCache(dict):
"""
In Django 1.8, bound fields are handled in the form context (__getitem__).
However, we want to inject our own BoundField for FormatField in order to
handle labels differently.
This can be achieved by implementing the get_bound_field method in our
field (available in Django >= 1.9). For now, if the method exists,
the bound_field is switched-in at the form level.
"""
def __setitem__(self, key, bf):
form, field, name = bf.form, bf.field, bf.name
if hasattr(field, 'get_bound_field'):
bf = field.get_bound_field(form, name)
return super(FormidableBoundFieldCache, self).__setitem__(key, bf)
class BaseDynamicForm(forms.Form):
"""
This class is used to generate the final Django form class corresponding to
the formidable object.
Please do not use this class directly, rather, you should check the
endpoint :func:`get_dynamic_form_class`
"""
def __init__(self, *args, **kwargs):
super(BaseDynamicForm, self).__init__(*args, **kwargs)
self._bound_fields_cache = FormidableBoundFieldCache()
def get_removed_fields(self, cleaned_data):
"""
Build the list of fields to be removed due to conditional displays
"""
# build a catalog of fields **targeted** by the conditions
condition_targets = {}
# For each condition, extract its status (should I display or not)
for condition in self._conditions:
# should we keep these fields?
keep_fields = condition.keep_fields(cleaned_data)
for field_id in condition.fields_ids:
# Fill the catalog
if field_id not in condition_targets:
condition_targets[field_id] = []
condition_targets[field_id].append(keep_fields)
# Here, the catalog contains fields targeted by 1 or many conditions.
# If only one condition says "please display X", we'll keep X
# That's why we gather the conditions using "any"
condition_targets = {k: any(v) for k, v in condition_targets.items()}
# We'll only remove fields that are targeted by conditions **and**
# those conditions are false
return (k for k, v in condition_targets.items() if not v)
def clean(self):
cleaned_data = super(BaseDynamicForm, self).clean()
removed_fields = self.get_removed_fields(cleaned_data)
for field_id in removed_fields:
# Remove field from cleaned_data
cleaned_data.pop(field_id, None)
# Remove from eventual existing errors
self.errors.pop(field_id, None)
# The field might have been removed if it was a file field.
if field_id in self.fields:
del self.fields[field_id]
return cleaned_data
def get_dynamic_form_class_from_schema(schema, field_factory=None):
"""
Return a dynamically generated and contextualized form class
"""
attrs = OrderedDict()
field_factory = field_factory or field_builder.FormFieldFactory()
doc = schema['description']
for field in schema['fields']:
try:
form_field = field_factory.produce(field)
except field_builder.SkipField:
pass
else:
attrs[field['slug']] = form_field
conditions = schema.get('conditions', None) or []
attrs['_conditions'] = conditions_register.build(
attrs,
conditions
)
form_class = type(str('DynamicForm'), (BaseDynamicForm,), attrs)
form_class.__doc__ = doc
return form_class
def get_dynamic_form_class(formidable, role=None, field_factory=None):
"""
This is the main method for getting a django form class from a formidable
object.
.. code-block:: python
form_obj = Formidable.objects.get(pk=42)
django_form_class = get_dynamic_form_class(form_obj)
The optional :params:`role` argument provides a way to get the form class
according to the access rights you specify by role. The :params:`role` must
be a role id, as defined by the code pointed to in
settings.FORMIDABLE_ACCESS_RIGHTS_LOADER.
.. code-block:: python
form_obj = Formidable.objects.get(pk=42)
django_form_class = get_dynamic_form_class(form_obj, role='jedi')
"""
attrs = OrderedDict()
field_factory = field_factory or field_builder.FormFieldFactory()
access_qs = Access.objects.all()
if role:
access_qs = access_qs.filter(access_id=role)
fields = formidable.fields.prefetch_related(
Prefetch('items', queryset=Item.objects.order_by('order')),
Prefetch('accesses', queryset=access_qs),
'validations', 'defaults'
)
for field in fields.order_by('order').all():
try:
form_field = field_factory.produce(field, role)
except field_builder.SkipField:
pass
else:
attrs[field.slug] = form_field
conditions_json = formidable.conditions or []
attrs['_conditions'] = conditions_register.build(attrs, conditions_json)
return type(str('DynamicForm'), (BaseDynamicForm,), attrs)
class FormidableForm(forms.Form):
"""
This is the main class available to build a formidable object with Django's
form API syntax.
It provides a class method :meth:`to_formidable` which saves the declared
form as a formidable objects.
Check the formidable.forms.fields module to see what fields are available
when defining your form.
"""
@classmethod
def to_formidable(cls, label=None, description=None, instance=None):
if not instance:
if not label:
raise ValueError("Label is required on creation mode")
description = description or ''
form = Formidable.objects.create(
label=label, description=description
)
else:
form = cls.get_clean_form(instance, label, description)
order = 0
for slug, field in cls.declared_fields.items():
field.to_formidable(form, order, slug)
order += 1
return form
@classmethod
def get_clean_form(cls, form, label, description):
"""
From a form definition and label and description value, the method
clean all fields and validations attached to the form.
If the label or description are not empty, those values are updated
in the database *and* in memory.
The returned object is a form without fields or validations , and
new label and description if needed.
"""
form.fields.all().delete()
if description or label:
kwargs = {
'description': description or form.description,
'label': label or form.label,
}
Formidable.objects.filter(pk=form.pk).update(**kwargs)
form.label = kwargs['label']
form.description = kwargs['description']
return form
| mit | -4,146,912,984,624,898,000 | 34.091743 | 79 | 0.642484 | false |
zenefits/sentry | src/sentry/api/urls.py | 1 | 23038 | from __future__ import absolute_import, print_function
from django.conf.urls import include, patterns, url
from .endpoints.api_tokens import ApiTokensEndpoint
from .endpoints.auth_index import AuthIndexEndpoint
from .endpoints.broadcast_index import BroadcastIndexEndpoint
from .endpoints.catchall import CatchallEndpoint
from .endpoints.event_details import EventDetailsEndpoint
from .endpoints.group_details import GroupDetailsEndpoint
from .endpoints.group_environment_details import GroupEnvironmentDetailsEndpoint
from .endpoints.group_events import GroupEventsEndpoint
from .endpoints.group_events_latest import GroupEventsLatestEndpoint
from .endpoints.group_events_oldest import GroupEventsOldestEndpoint
from .endpoints.group_hashes import GroupHashesEndpoint
from .endpoints.group_notes import GroupNotesEndpoint
from .endpoints.group_notes_details import GroupNotesDetailsEndpoint
from .endpoints.group_participants import GroupParticipantsEndpoint
from .endpoints.group_stats import GroupStatsEndpoint
from .endpoints.group_tags import GroupTagsEndpoint
from .endpoints.group_tagkey_details import GroupTagKeyDetailsEndpoint
from .endpoints.group_tagkey_values import GroupTagKeyValuesEndpoint
from .endpoints.group_user_reports import GroupUserReportsEndpoint
from .endpoints.index import IndexEndpoint
from .endpoints.internal_stats import InternalStatsEndpoint
from .endpoints.legacy_project_redirect import LegacyProjectRedirectEndpoint
from .endpoints.organization_access_request_details import OrganizationAccessRequestDetailsEndpoint
from .endpoints.organization_activity import OrganizationActivityEndpoint
from .endpoints.organization_auditlogs import OrganizationAuditLogsEndpoint
from .endpoints.organization_details import OrganizationDetailsEndpoint
from .endpoints.organization_shortid import ShortIdLookupEndpoint
from .endpoints.organization_slugs import SlugsUpdateEndpoint
from .endpoints.organization_issues_new import OrganizationIssuesNewEndpoint
from .endpoints.organization_member_details import OrganizationMemberDetailsEndpoint
from .endpoints.organization_member_index import OrganizationMemberIndexEndpoint
from .endpoints.organization_member_issues_assigned import OrganizationMemberIssuesAssignedEndpoint
from .endpoints.organization_member_issues_bookmarked import OrganizationMemberIssuesBookmarkedEndpoint
from .endpoints.organization_member_issues_viewed import OrganizationMemberIssuesViewedEndpoint
from .endpoints.organization_member_team_details import OrganizationMemberTeamDetailsEndpoint
from .endpoints.organization_onboarding_tasks import OrganizationOnboardingTaskEndpoint
from .endpoints.organization_index import OrganizationIndexEndpoint
from .endpoints.organization_projects import OrganizationProjectsEndpoint
from .endpoints.organization_repositories import OrganizationRepositoriesEndpoint
from .endpoints.organization_config_repositories import OrganizationConfigRepositoriesEndpoint
from .endpoints.organization_repository_commits import OrganizationRepositoryCommitsEndpoint
from .endpoints.organization_repository_details import OrganizationRepositoryDetailsEndpoint
from .endpoints.organization_stats import OrganizationStatsEndpoint
from .endpoints.organization_teams import OrganizationTeamsEndpoint
from .endpoints.organization_user_issues_search import OrganizationUserIssuesSearchEndpoint
from .endpoints.project_details import ProjectDetailsEndpoint
from .endpoints.project_docs import ProjectDocsEndpoint
from .endpoints.project_docs_platform import ProjectDocsPlatformEndpoint
from .endpoints.project_environments import ProjectEnvironmentsEndpoint
from .endpoints.project_events import ProjectEventsEndpoint
from .endpoints.project_event_details import ProjectEventDetailsEndpoint
from .endpoints.project_filters import ProjectFiltersEndpoint
from .endpoints.project_filter_details import ProjectFilterDetailsEndpoint
from .endpoints.project_group_index import ProjectGroupIndexEndpoint
from .endpoints.project_group_stats import ProjectGroupStatsEndpoint
from .endpoints.project_index import ProjectIndexEndpoint
from .endpoints.project_keys import ProjectKeysEndpoint
from .endpoints.project_key_details import ProjectKeyDetailsEndpoint
from .endpoints.project_member_index import ProjectMemberIndexEndpoint
from .endpoints.project_plugin_details import ProjectPluginDetailsEndpoint
from .endpoints.project_releases import ProjectReleasesEndpoint
from .endpoints.project_rules import ProjectRulesEndpoint
from .endpoints.project_rule_details import ProjectRuleDetailsEndpoint
from .endpoints.project_searches import ProjectSearchesEndpoint
from .endpoints.project_search_details import ProjectSearchDetailsEndpoint
from .endpoints.project_stats import ProjectStatsEndpoint
from .endpoints.project_tags import ProjectTagsEndpoint
from .endpoints.project_tagkey_details import ProjectTagKeyDetailsEndpoint
from .endpoints.project_tagkey_values import ProjectTagKeyValuesEndpoint
from .endpoints.project_users import ProjectUsersEndpoint
from .endpoints.project_user_reports import ProjectUserReportsEndpoint
from .endpoints.release_commits import ReleaseCommitsEndpoint
from .endpoints.release_details import ReleaseDetailsEndpoint
from .endpoints.release_files import ReleaseFilesEndpoint
from .endpoints.release_file_details import ReleaseFileDetailsEndpoint
from .endpoints.dsym_files import DSymFilesEndpoint, GlobalDSymFilesEndpoint, \
UnknownDSymFilesEndpoint, UnknownGlobalDSymFilesEndpoint
from .endpoints.shared_group_details import SharedGroupDetailsEndpoint
from .endpoints.system_health import SystemHealthEndpoint
from .endpoints.system_options import SystemOptionsEndpoint
from .endpoints.team_details import TeamDetailsEndpoint
from .endpoints.team_groups_new import TeamGroupsNewEndpoint
from .endpoints.team_groups_trending import TeamGroupsTrendingEndpoint
from .endpoints.team_members import TeamMembersEndpoint
from .endpoints.team_project_index import TeamProjectIndexEndpoint
from .endpoints.team_stats import TeamStatsEndpoint
from .endpoints.useravatar import UserAvatarEndpoint
from .endpoints.user_authenticator_details import UserAuthenticatorDetailsEndpoint
from .endpoints.user_identity_details import UserIdentityDetailsEndpoint
from .endpoints.user_index import UserIndexEndpoint
from .endpoints.user_details import UserDetailsEndpoint
from .endpoints.user_organizations import UserOrganizationsEndpoint
urlpatterns = patterns(
'',
# Api Tokens
url(r'^api-tokens/$',
ApiTokensEndpoint.as_view(),
name='sentry-api-0-api-tokens'),
# Auth
url(r'^auth/$',
AuthIndexEndpoint.as_view(),
name='sentry-api-0-auth'),
# Broadcasts
url(r'^broadcasts/$',
BroadcastIndexEndpoint.as_view(),
name='sentry-api-0-broadcast-index'),
# Users
url(r'^users/$',
UserIndexEndpoint.as_view(),
name='sentry-api-0-user-index'),
url(r'^users/(?P<user_id>[^\/]+)/$',
UserDetailsEndpoint.as_view(),
name='sentry-api-0-user-details'),
url(r'^users/(?P<user_id>[^\/]+)/avatar/$',
UserAvatarEndpoint.as_view(),
name='sentry-api-0-user-avatar'),
url(r'^users/(?P<user_id>[^\/]+)/authenticators/(?P<auth_id>[^\/]+)/$',
UserAuthenticatorDetailsEndpoint.as_view(),
name='sentry-api-0-user-authenticator-details'),
url(r'^users/(?P<user_id>[^\/]+)/identities/(?P<identity_id>[^\/]+)/$',
UserIdentityDetailsEndpoint.as_view(),
name='sentry-api-0-user-identity-details'),
url(r'^users/(?P<user_id>[^\/]+)/organizations/$',
UserOrganizationsEndpoint.as_view(),
name='sentry-api-0-user-organizations'),
# Organizations
url(r'^organizations/$',
OrganizationIndexEndpoint.as_view(),
name='sentry-api-0-organizations'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/$',
OrganizationDetailsEndpoint.as_view(),
name='sentry-api-0-organization-details'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/shortids/(?P<short_id>[^\/]+)/$',
ShortIdLookupEndpoint.as_view(),
name='sentry-api-0-short-id-lookup'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/slugs/$',
SlugsUpdateEndpoint.as_view(),
name='sentry-api-0-short-ids-update'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/access-requests/(?P<request_id>\d+)/$',
OrganizationAccessRequestDetailsEndpoint.as_view(),
name='sentry-api-0-organization-access-request-details'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/activity/$',
OrganizationActivityEndpoint.as_view(),
name='sentry-api-0-organization-activity'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/audit-logs/$',
OrganizationAuditLogsEndpoint.as_view(),
name='sentry-api-0-organization-audit-logs'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/config/repos/$',
OrganizationConfigRepositoriesEndpoint.as_view(),
name='sentry-api-0-organization-config-repositories'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/issues/new/$',
OrganizationIssuesNewEndpoint.as_view(),
name='sentry-api-0-organization-issues-new'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/members/$',
OrganizationMemberIndexEndpoint.as_view(),
name='sentry-api-0-organization-member-index'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/users/issues/$',
OrganizationUserIssuesSearchEndpoint.as_view(),
name='sentry-api-0-organization-issue-search'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/$',
OrganizationMemberDetailsEndpoint.as_view(),
name='sentry-api-0-organization-member-details'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/issues/assigned/$',
OrganizationMemberIssuesAssignedEndpoint.as_view(),
name='sentry-api-0-organization-member-issues-assigned'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/issues/bookmarked/$',
OrganizationMemberIssuesBookmarkedEndpoint.as_view(),
name='sentry-api-0-organization-member-issues-bookmarked'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/issues/viewed/$',
OrganizationMemberIssuesViewedEndpoint.as_view(),
name='sentry-api-0-organization-member-issues-viewed'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/teams/(?P<team_slug>[^\/]+)/$',
OrganizationMemberTeamDetailsEndpoint.as_view(),
name='sentry-api-0-organization-member-team-details'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/projects/$',
OrganizationProjectsEndpoint.as_view(),
name='sentry-api-0-organization-projects'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/repos/$',
OrganizationRepositoriesEndpoint.as_view(),
name='sentry-api-0-organization-repositories'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/repos/(?P<repo_id>[^\/]+)/$',
OrganizationRepositoryDetailsEndpoint.as_view(),
name='sentry-api-0-organization-repository-details'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/repos/(?P<repo_id>[^\/]+)/commits/$',
OrganizationRepositoryCommitsEndpoint.as_view(),
name='sentry-api-0-organization-repository-commits'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/stats/$',
OrganizationStatsEndpoint.as_view(),
name='sentry-api-0-organization-stats'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/teams/$',
OrganizationTeamsEndpoint.as_view(),
name='sentry-api-0-organization-teams'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/onboarding-tasks/$',
OrganizationOnboardingTaskEndpoint.as_view(),
name='sentry-api-0-organization-onboardingtasks'),
# Teams
url(r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/$',
TeamDetailsEndpoint.as_view(),
name='sentry-api-0-team-details'),
url(r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/(?:issues|groups)/new/$',
TeamGroupsNewEndpoint.as_view(),
name='sentry-api-0-team-groups-new'),
url(r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/(?:issues|groups)/trending/$',
TeamGroupsTrendingEndpoint.as_view(),
name='sentry-api-0-team-groups-trending'),
url(r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/members/$',
TeamMembersEndpoint.as_view(),
name='sentry-api-0-team-members'),
url(r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/projects/$',
TeamProjectIndexEndpoint.as_view(),
name='sentry-api-0-team-project-index'),
url(r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/stats/$',
TeamStatsEndpoint.as_view(),
name='sentry-api-0-team-stats'),
# Handles redirecting project_id => org_slug/project_slug
# TODO(dcramer): remove this after a reasonable period of time
url(r'^projects/(?P<project_id>\d+)/(?P<path>(?:groups|releases|stats|tags)/.*)$',
LegacyProjectRedirectEndpoint.as_view()),
# Projects
url(r'^projects/$',
ProjectIndexEndpoint.as_view(),
name='sentry-api-0-projects'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/$',
ProjectDetailsEndpoint.as_view(),
name='sentry-api-0-project-details'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/docs/$',
ProjectDocsEndpoint.as_view(),
name='sentry-api-0-project-docs'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/docs/(?P<platform>[\w-]+)/$',
ProjectDocsPlatformEndpoint.as_view(),
name='sentry-api-0-project-docs-platform'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/environments/$',
ProjectEnvironmentsEndpoint.as_view(),
name='sentry-api-0-project-environments'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/$',
ProjectEventsEndpoint.as_view(),
name='sentry-api-0-project-events'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/$',
ProjectEventDetailsEndpoint.as_view(),
name='sentry-api-0-project-event-details'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/filters/$',
ProjectFiltersEndpoint.as_view(),
name='sentry-api-0-project-filters'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/filters/(?P<filter_id>[\w-]+)/$',
ProjectFilterDetailsEndpoint.as_view(),
name='sentry-api-0-project-filters'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/(?:issues|groups)/$',
ProjectGroupIndexEndpoint.as_view(),
name='sentry-api-0-project-group-index'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/(?:issues|groups)/stats/$',
ProjectGroupStatsEndpoint.as_view(),
name='sentry-api-0-project-group-stats'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/$',
ProjectKeysEndpoint.as_view(),
name='sentry-api-0-project-keys'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/(?P<key_id>[^\/]+)/$',
ProjectKeyDetailsEndpoint.as_view(),
name='sentry-api-0-project-key-details'),
url(r'^projects/(?P<organization_slug>[^/]+)/(?P<project_slug>[^/]+)/members/$',
ProjectMemberIndexEndpoint.as_view(),
name='sentry-api-0-project-member-index'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/$',
ProjectReleasesEndpoint.as_view(),
name='sentry-api-0-project-releases'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/$',
ReleaseDetailsEndpoint.as_view(),
name='sentry-api-0-release-details'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/commits/$',
ReleaseCommitsEndpoint.as_view(),
name='sentry-api-0-release-commits'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/$',
ReleaseFilesEndpoint.as_view(),
name='sentry-api-0-release-files'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/(?P<file_id>\d+)/$',
ReleaseFileDetailsEndpoint.as_view(),
name='sentry-api-0-release-file-details'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/dsyms/$',
DSymFilesEndpoint.as_view(),
name='sentry-api-0-dsym-files'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/dsyms/unknown/$',
UnknownDSymFilesEndpoint.as_view(),
name='sentry-api-0-unknown-dsym-files'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/rules/$',
ProjectRulesEndpoint.as_view(),
name='sentry-api-0-project-rules'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/rules/(?P<rule_id>[^\/]+)/$',
ProjectRuleDetailsEndpoint.as_view(),
name='sentry-api-0-project-rule-details'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/searches/$',
ProjectSearchesEndpoint.as_view(),
name='sentry-api-0-project-searches'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/searches/(?P<search_id>[^\/]+)/$',
ProjectSearchDetailsEndpoint.as_view(),
name='sentry-api-0-project-search-details'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/stats/$',
ProjectStatsEndpoint.as_view(),
name='sentry-api-0-project-stats'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/$',
ProjectTagsEndpoint.as_view(),
name='sentry-api-0-project-tags'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/(?P<key>[^/]+)/$',
ProjectTagKeyDetailsEndpoint.as_view(),
name='sentry-api-0-project-tagkey-details'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/(?P<key>[^/]+)/values/$',
ProjectTagKeyValuesEndpoint.as_view(),
name='sentry-api-0-project-tagkey-values'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/users/$',
ProjectUsersEndpoint.as_view(),
name='sentry-api-0-project-users'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/(?:user-feedback|user-reports)/$',
ProjectUserReportsEndpoint.as_view(),
name='sentry-api-0-project-user-reports'),
# Load plugin project urls
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/plugins/(?P<plugin_id>[^\/]+)/$',
ProjectPluginDetailsEndpoint.as_view(),
name='sentry-api-0-project-plugin-details'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/plugins?/',
include('sentry.plugins.base.project_api_urls')),
# Groups
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/$',
GroupDetailsEndpoint.as_view(),
name='sentry-api-0-group-details'),
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/events/$',
GroupEventsEndpoint.as_view(),
name='sentry-api-0-group-events'),
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/events/latest/$',
GroupEventsLatestEndpoint.as_view(),
name='sentry-api-0-group-events-latest'),
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/events/oldest/$',
GroupEventsOldestEndpoint.as_view(),
name='sentry-api-0-group-events-oldest'),
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/(?:notes|comments)/$',
GroupNotesEndpoint.as_view(),
name='sentry-api-0-group-notes'),
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/(?:notes|comments)/(?P<note_id>[^\/]+)/$',
GroupNotesDetailsEndpoint.as_view(),
name='sentry-api-0-group-notes-details'),
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/hashes/$',
GroupHashesEndpoint.as_view(),
name='sentry-api-0-group-events'),
url(r'^issues/(?P<issue_id>\d+)/participants/$',
GroupParticipantsEndpoint.as_view(),
name='sentry-api-0-group-stats'),
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/stats/$',
GroupStatsEndpoint.as_view(),
name='sentry-api-0-group-stats'),
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/environments/(?P<environment>[^/]+)/$',
GroupEnvironmentDetailsEndpoint.as_view(),
name='sentry-api-0-group-environment-details'),
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/tags/$',
GroupTagsEndpoint.as_view(),
name='sentry-api-0-group-tags'),
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/tags/(?P<key>[^/]+)/$',
GroupTagKeyDetailsEndpoint.as_view(),
name='sentry-api-0-group-tagkey-details'),
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/tags/(?P<key>[^/]+)/values/$',
GroupTagKeyValuesEndpoint.as_view(),
name='sentry-api-0-group-tagkey-values'),
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/(?:user-feedback|user-reports)/$',
GroupUserReportsEndpoint.as_view(),
name='sentry-api-0-group-user-reports'),
# Load plugin group urls
url(r'^(?:issues|groups)/(?P<issue_id>\d+)/plugins?/',
include('sentry.plugins.base.group_api_urls')),
url(r'^shared/(?:issues|groups)/(?P<share_id>[^\/]+)/$',
SharedGroupDetailsEndpoint.as_view(),
name='sentry-api-0-shared-group-details'),
# Events
url(r'^events/(?P<event_id>\d+)/$',
EventDetailsEndpoint.as_view(),
name='sentry-api-0-event-details'),
# Installation Global Endpoints
url(r'^system/global-dsyms/$',
GlobalDSymFilesEndpoint.as_view(),
name='sentry-api-0-global-dsym-files'),
url(r'^system/global-dsyms/unknown/$',
UnknownGlobalDSymFilesEndpoint.as_view(),
name='sentry-api-0-unknown-global-dsym-files'),
# Internal
url(r'^internal/health/$',
SystemHealthEndpoint.as_view(),
name='sentry-api-0-system-health'),
url(r'^internal/options/$',
SystemOptionsEndpoint.as_view(),
name='sentry-api-0-system-options'),
url(r'^internal/stats/$',
InternalStatsEndpoint.as_view(),
name='sentry-api-0-internal-stats'),
url(r'^$',
IndexEndpoint.as_view(),
name='sentry-api-index'),
url(r'^',
CatchallEndpoint.as_view(),
name='sentry-api-catchall'),
# url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
)
| bsd-3-clause | -7,951,625,677,568,653,000 | 53.852381 | 129 | 0.685433 | false |
SleepyDeveloper/alexa-cookbook | tools/TestFlow/sampleskill3/index.py | 2 | 7822 | """
This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.
The Intent Schema, Custom Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
from __future__ import print_function
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to the Alexa Skills Kit sample. " \
"Please tell me your favorite color by saying, " \
"my favorite color is red"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Please tell me your favorite color by saying, " \
"my favorite color is red."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying the Alexa Skills Kit sample. " \
"Have a nice day! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
def create_favorite_color_attributes(favorite_color):
return {"favoriteColor": favorite_color}
def set_color_in_session(intent, session):
""" Sets the color in the session and prepares the speech to reply to the
user.
"""
card_title = intent['name']
session_attributes = {}
should_end_session = False
if 'Color' in intent['slots']:
favorite_color = intent['slots']['Color']['value']
session_attributes = create_favorite_color_attributes(favorite_color)
speech_output = "I now know your favorite color is " + \
favorite_color + \
". You can ask me your favorite color by saying, " \
"what's my favorite color?"
reprompt_text = "You can ask me your favorite color by saying, " \
"what's my favorite color?"
else:
speech_output = "I'm not sure what your favorite color is. " \
"Please try again."
reprompt_text = "I'm not sure what your favorite color is. " \
"You can tell me your favorite color by saying, " \
"my favorite color is red."
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_color_from_session(intent, session):
session_attributes = {}
reprompt_text = None
if session.get('attributes', {}) and "favoriteColor" in session.get('attributes', {}):
favorite_color = session['attributes']['favoriteColor']
speech_output = "Your favorite color is " + favorite_color + \
". Goodbye."
should_end_session = True
else:
speech_output = "I'm not sure what your favorite color is. " \
"You can say, my favorite color is red."
should_end_session = False
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
# print("on_launch requestId=" + launch_request['requestId'] + ", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
# print("on_intent requestId=" + intent_request['requestId'] + ", sessionId=" + session['sessionId'])
# print("print comment from intent ", intent_request['intent']['name'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "MyColorIsIntent":
return set_color_in_session(intent, session)
elif intent_name == "WhatsMyColorIntent":
return get_color_from_session(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
# print("event.session.application.applicationId=" + event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
| apache-2.0 | 4,030,710,574,117,284,000 | 37.156098 | 106 | 0.623114 | false |
MG-group-tools/MGFunc | mgfunc_v2/swiss2tab.py | 1 | 7278 | from __future__ import division
import argparse
from Bio import SeqIO
from datetime import datetime as dt
import time
import os
import sys
import gzip
class main:
def __init__(self):
self.start = time.time()
self.d_ = dt.today()
self.timestarted = self.d_.strftime("%d-%m-%Y %H:%M:%S")
self.parseArgs()
def parseArgs(self):###GETTING ARGUMENTS FROM COMMANDLINE###
parser = argparse.ArgumentParser(prog="swiss2tab",usage="swiss2tab.py -i <input UNIPROT> -o <output-file>",epilog="Example: python2.7 swiss2tab.py -i uniprot_sprot.dat -o uniprot_sprot.tab\n\nWritten by Kosai+Asli, OCT 2013. Last modified MAY 2014.",description="Desctription: Extracts AC,ID,DE,GN,Taxonomy,AC(cession),Organism,ncbi_taxID,GO-term,KEGG-id from STOCKHOLM-formatted file and converts it to tabular-format")
parser.add_argument("-i",metavar="database", help="STOCKHOLM-formatted database",nargs=1,required=True)
parser.add_argument("-o",metavar="OUTPUT NAME",help="output-name, put the whole output name, fx '-o uniprot.dat.tab'",nargs=1,required=True)
# parser.add_argument("-q","--quiet",help="Quiet-mode, suppresses all stdout output. Write \"-q\" with no arguments in commandline. Default is off.",action="store_true")
parser.add_argument("-v",help="Verbose. Prints out progress and details to stdout output. Write \"-v\" with no arguments in commandline. Default is off.",action="store_true")
# return parser.parse_args(), parser
self.parser = parser
def makeTAB(self):
fid = self.gzipopen(self.args.i[0]) #input_database
fout = open(self.args.o[0],"w") #output_tab-file-name
dbfile = os.popen("grep \"ID \" "+self.args.i[0] + " | wc -l")
ctot = dbfile.read()
dbfile.close()
ctot = int(ctot.split(" ")[0])
rangelist = range(0,ctot,10000)
timeEST = ctot*17/536489
self.printer("Estimated time usage: "+str(round(timeEST,1))+" minutes ("+str(round(timeEST/60,1))+" hours)\n")
input_seq_iterator = SeqIO.parse(fid, "swiss")
fout.write("AC(name)\tID\tDE\tGN\tTaxonomy\tAccession\tOrganism\tncbi_taxID\tGO_term\tKEGG_id\n")
rowstring = ""
c = 0
for record in input_seq_iterator:
if record.name:
rowstring += record.name+"\t"
else:
rowstring += "N/A\t"
if record.id:
rowstring += record.id+"\t"
else:
rowstring += "N/A\t"
if record.description:
rowstring += record.description+"\t"
else:
rowstring += "N/A\t"
if record.annotations:
if 'gene_name' in record.annotations:
rowstring += str(record.annotations['gene_name'])+"\t"
else:
rowstring += "N/A\t"
if "taxonomy" in record.annotations:
rowstring += str(record.annotations["taxonomy"])+"\t"
else:
rowstring += "N/A\t"
if "accessions" in record.annotations:
rowstring += str(record.annotations['accessions'])+"\t"
else:
rowstring += "N/A\t"
if "organism" in record.annotations:
rowstring += str(record.annotations['organism'])+"\t"
else:
rowstring += "N/A\t"
if "ncbi_taxid" in record.annotations:
rowstring += str(record.annotations['ncbi_taxid'])+"\t"
else:
rowstring += "N/A\t"
KEGG = []
GO = []
if record.dbxrefs:
for el in record.dbxrefs:
if el[0:3] == "GO:":
# rowstring += el[3:]+";"
GO.append(el[3:])
if el[0:5] == "KEGG:":
KEGG.append(el[5:])
if not KEGG:
# rowstring += "N/A"
KEGG.append("N/A")
if not GO:
GO.append("N/A")
go = ";".join(GO)
kegg = ";".join(KEGG)
rowstring += go + "\t" + kegg
fout.write(rowstring+"\n")
rowstring = ""
c += 1
if c in rangelist or c==1:
self.printer("FINISHED "+str(c)+" ENTRIES out of "+str(ctot)+"\n")
sys.stdout.flush()
self.printer("FINISHED "+str(c)+" ENTRIES out of "+str(ctot)+"\n")
fid.close()
fout.close()
self.indextab()
def printer(self,string): #surpressing output print if -q (quiet) is on
# if not self.args.quiet:
if self.args.v:
print string,
def indextab(self):
fid = open(self.args.o[0],"r")
fout = open(self.args.o[0]+".indexed","w")
line = fid.readline()
while 1:
start = fid.tell()
line = fid.readline()
if not line or not len(line):
# stop = fid.tell()
# header = line.split("\t")[0]
# fout.write(header + "\t" + str(start) + "," + str(stop)+"\n")
break
stop = fid.tell()
header = line.split("\t")[0]
fout.write(header + "\t" + str(start) + "," + str(stop)+"\n")
fout.close()
fid.close()
def gzipopen(self,fileID):
if fileID[-3:] == ".gz":
return gzip.open(fileID)
else:
return open(fileID,"rU")
def mainthing(self):
# self.printer("Cluster2Fasta initialized at"+str(self.timestarted)+"\n")
self.makeTAB()
timeused = (time.time() - self.start) / 60
self.printer("### Time used: "+str(round(timeused)) + " min ("+str(round(timeused/60,1))+" hours)\n")
if __name__ == "__main__":
try:
myclass = main()
myclass.args = myclass.parser.parse_args(sys.argv[1:])
myclass.printer("\n### "+sys.argv[0]+" initialized at "+ myclass.timestarted + "\n")
myclass.printer("### OPTIONS: "+str(myclass.args)+"\n")
myclass.mainthing()
except IOError as i:
print "I/O error({0}): {1}".format(i.errno, i.strerror)
except Exception,e:
print str(e)
import traceback
traceback.print_exc()
# myclass = main()
# myclass.args = myclass.parser.parse_args(sys.argv[1:])
# myclass.mainthing()
'''
handle=open(swissfilename, "rU")
input_seq_iterator = SeqIO.parse(handle, "swiss")
for record in input_seq_iterator:
print record.id, record.name, record.description,record.annotations["taxonomy"],record.annotations['accessions'], record.annotations['ncbi_taxid'], record.annotations['organism'], record.annotations['gene_name']
handle.close()
'''
######################
'''
INPUT:
Extracts AC,ID,DE,GN,Taxonomy,AC(cession),Organism,ncbi_taxID,GO_term,KEGG-id
from STOCKHOLM-formatted file and converts it to tabular-format
OUTPUT:
Tabular form of a stockholm-formatted file, where each line is
an entry.
OPTIONS LIST:
-i database: STOCKHOLM-formatted database
-o OUTPUT NAME: output-name, tab-formatted
-q quiet: Quiet-mode, suppresses all stdout output. Write "-q" with noarguments in commandline. Default is off.
'''
| gpl-3.0 | 3,271,182,841,077,309,400 | 36.515464 | 423 | 0.554411 | false |
arkem/pyflag | src/pyflag/ColumnTypes.py | 1 | 53873 | #!/usr/bin/env python
# ******************************************************
# Copyright 2004: Commonwealth of Australia.
#
# Developed by the Computer Network Vulnerability Team,
# Information Security Group.
# Department of Defence.
#
# Michael Cohen <[email protected]>
#
# ******************************************************
# Version: FLAG $Version: 0.87-pre1 Date: Thu Jun 12 00:48:38 EST 2008$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# ******************************************************
""" This module implements the base classes for column types. These
are used by the table widget for implementing special handling for
data types, operators etc.
"""
from pyflag.TableObj import TableObj
import pyflag.FlagFramework as FlagFramework
from pyflag.FlagFramework import Curry, query_type
import pyflag.conf
config=pyflag.conf.ConfObject()
import pyflag.DB as DB
import pyflag.TypeCheck as TypeCheck
import pyflag.FileSystem as FileSystem
import socket,re
import pyflag.Time as Time
import time, textwrap
import pyflag.Registry as Registry
import re,struct, textwrap
import pyflag.TableActions as TableActions
class date_obj:
format = "%Y-%m-%d %H:%M:%S"
def __init__(self, date):
self.date = date
def __str__(self):
try:
return time.strftime(self.format,self.date)
except TypeError:
return self.date.strftime(self.format)
def __eq__(self, x):
return x == self.date
def __le__(self, x):
return x < self.date
def __gt__(self, x):
return x > self.date
try:
## This is for parsing ambigous dates:
import dateutil.parser
def guess_date(arg):
try:
return date_obj(dateutil.parser.parse(arg))
except ValueError:
## Try a fuzzy match
return date_obj(dateutil.parser.parse(arg, fuzzy=True))
except ImportError:
import time
FORMATS = [ "%Y%m%d %H:%M:%S",
"%Y%m%d %H:%M",
"%Y%m%d",
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M",
"%Y-%m-%d",
"%d/%m/%Y",
"%d/%m/%Y %H:%M:%S",
"%d/%m/%Y %H:%M",
"%d/%m/%Y %H:%M:%S",
"%Y/%m/%d:%H:%M:%S",
"%a, %d %b %Y %H:%M:%S %Z",
]
def guess_date(string):
for i in range(len(FORMATS)):
try:
f = FORMATS[i]
result = time.strptime(string, f)
## Move the format to the start we are likely to use it again
if i>0:
FORMATS.pop(i)
FORMATS.insert(0,f)
return date_obj(result)
except ValueError:
pass
raise ValueError("Unable to parse date %s" % string)
class LogParser:
defaults = [ ['name', "Name", ""],
['column', "DB Column", ""],
]
def render_form(self, basename, result):
""" A hook called from the Advanced Log builder which allows us to build this column using the GUI. Note that LogCompatible must be True for this to work. """
for name, description, default in self.defaults:
fieldname = "%s_%s" % (basename,name)
result.defaults[fieldname] = default
result.textfield(description, fieldname)
def parse_form(self, basename, query):
""" Returns an argv which can be used to instantiate the
column type based on query
"""
result = {}
for name, description, default in self.defaults:
result[name] = query.get(basename+name, default)
return result
class LogParserMixin:
""" This is a mixin class which should be used to designate a
class as suitable for log analysis
"""
LogCompatible = True
class LogParser(LogParser):
pass
## The following are common column types which the parser can
## handle. ColumnTypes can be defined as plugins by extending the
## ColumnTypes base class.
class ColumnType:
""" Base class for column type searches.
Tables are just collections of column types. These objects are
responsible for displaying the values from the column and are used
to generate SQL.
"""
## This contols if the user is able to select it as a columntype
## when importing a log file.
hidden = False
ignore = False
mode = 'Full'
## This is a list of the tests that should be run. In this format:
## filter string, is an exception excepted?
tests = [ ["=", "0", False],
[">", "0", False] ]
def __init__(self, name=None,
column=None, link='',
callback=None, link_pane='self',
regex = r"[^\s]+",
boundary = r'\s+', case=None, default=None,
wrap=True, table=None, **kwargs
):
if not name or not column:
raise RuntimeError("%s: You must set both name (%s) and column (%s)" % (
self,name, column))
self.name = name
self.extended_names = [ name ]
self.column = column
self.link = link
self.callback = callback
self.link_pane = link_pane
self.regex = re.compile(regex)
self.regex_str = regex
self.boundary = re.compile(boundary)
self.wrap = wrap
self.table = table
self.case = case
self.default = default
for k,v in kwargs.items():
setattr(self, k, v)
## These are the symbols which will be treated literally
symbols = {
}
def __repr__(self):
return "<ColumnType %s, name %s>" % (self.__class__.__name__, self.name)
def make_index(self, dbh, table):
""" Creates an index on table using dbh """
dbh.check_index(table, self.column)
def where(self):
pass
def operators(self, context = 'sql'):
""" Returns a list of operators we support """
ops = self.symbols.copy()
if context == 'sql':
prefix = 'operator_'
else:
prefix = "code_"
for m in dir(self):
if m.startswith(prefix):
ops[m[len(prefix):]]=m
return ops
## When returning an sql context we expect to get a string
## containing the sql to be written to the server. When called
## with code context, we expect to get back a function of
## prototype: x(row) which evaluates the expression given a dict
## row of all the columns in the row.
def parse(self, column, operator, arg, context='sql', ui=None, elements=None):
""" Parse the current expression using the operators available
in the column type. ui is a ui which may be used by us to
render any specialised errors (if we raise it the GUI will
render it for us). elements is the full list of all the other
elements involved in the parsing."""
## Try to find the method which handles this operator. We look
## first in symbols and then in a method containing the name
## requested:
self.ui = ui
self.elements = elements
if context == 'sql':
prefix = "operator_"
else:
prefix = 'code_'
if operator in self.symbols:
## This has to succeed or there is a programming error.
method = getattr(self, prefix + self.symbols[operator])
else:
try:
method = getattr(self, prefix + operator)
except Exception,e:
print e
raise RuntimeError("%s is of type %s and has no operator %r.\nDoes it make sense to use this operator on this data?" % (column, ("%s"% self.__class__).split('.')[-1], operator))
return method(column, operator, arg)
def mode_Full(self):
return "%s" % (self.table, )
def mode_Recent(self):
if self.table == 'inode':
return self.mode_Full()
## Check that the table is up to date
dbh = DB.DBO(self.case)
dbh.execute("select max(inode_id) as max from `%s`", self.table)
max_inode_id = dbh.fetch()['max']
dbh.execute("select max(inode_id) as max from `%s_Recent`", self.table)
max_current_inode_id = dbh.fetch()['max']
if max_current_inode_id < max_inode_id:
dbh2 = dbh.clone()
dbh.execute("select * from `%s` order by inode_id desc limit 10", self.table)
for row in dbh:
for k,v in row.items():
if not v or v=='None':
del row[k]
dbh2.insert("%s_Recent" % self.table,
_fast = True,
**row)
dbh.invalidate("%s_Recent" % self.table)
return "%s_Recent" % (self.table, )
def join_table(self):
table = getattr(self, "mode_%s" % self.mode)()
return table
def escape_column_name(self, column_name):
if self.table == None:
raise RuntimeError("Table can not be None")
## The table we actually use depends on the current mode:
table = getattr(self, "mode_%s" % self.mode)()
return "`%s`.`%s`" % (table, column_name)
def code_literal(self, column, operator, arg):
## Bit of a hack really:
return lambda row: eval(DB.expand("%r %s %r", (row[self.column], operator, arg.__str__())), {})
def operator_literal(self, column,operator, arg):
column = self.escape_column_name(self.column)
return DB.expand("%s %s %r" ,(column, operator, arg))
def code_equal(self, column, operator, arg):
## Make sure our arg is actually an integer:
return lambda row: row[self.column] == arg
def operator_equal(self, column, operator, address):
return self.operator_literal(column, '=', address)
def link_display_hook(self, value, row, result):
if self.link and not self.callback:
q = self.link.clone()
q.FillQueryTarget(value)
tmp = result.__str__()
result.clear()
result.link(tmp, q, pane=self.link_pane)
def plain_display_hook(self, value, row, result):
if value:
## Remove non printable chars:
value = ''.join([ x for x in value if ord(x)>31 ])
result.text(value, wrap="full")
display_hooks = [ plain_display_hook, link_display_hook, ]
display_hooks_names = [ "plain_display_hook", "link_display_hook", ]
def display(self, value, row, result):
""" This method is called by the table widget to allow us to
translate the output from the database to the screen. Note
that we have access to the entire row (i.e. all the values in
the query if we need it).
"""
## By default just implement a simple callback:
if self.callback:
return self.callback(value)
elif self.wrap:
value = textwrap.fill( "%s" % value)
## Allow all our display hooks to do their things
for hook in self.display_hooks:
hook(self, value, row, result)
def csv(self, value):
""" This outputs data for csv output"""
## We seem to need to escape this for some stupid spreadsheets
try:
value.replace("\n","\\n")
value.replace("\r","\\r")
except AttributeError:
# Probably not a string...
pass
## If we have a callback we cant render anything:
if self.callback:
return "-"
else: return value
def extended_csv(self, value):
return {self.name:self.csv(value)}
def render_html(self, value, table_renderer):
""" This is used by the HTML renderer to render the column
into HTML
"""
if value:
import pyflag.HTMLUI as HTMLUI
result = HTMLUI.HTMLUI(initial = True)
result.text(FlagFramework.smart_unicode(value), wrap='full')
value = result.__str__()
return value
def export(self, value, exportdir):
""" The export method allows a ColumnType to perform some action when
a user exports a table. e.g. Copy data to an export directory
"""
#print "EXPORTING: %s to %s" % (value, exportdir)
pass
def create(self):
""" This needs to generate a create clause for creating this
table. It is used when we wish to make a table with this
column type.
"""
def insert(self, value):
""" This function returns the sql required to set the name of
the column to value.
@returns: (column name, value)
Note that column name must be preceeded with _ if value needs to be taken literally (not escaped).
WARNING: It is up to the column type to enforce adequate
escaping if _ is used. This may be a potential vulnerability
when loading untrusted log files.
If None is returned, the value is not inserted into this
column position, and the columns default value will be used.
"""
return self.column, value
def select(self):
""" Returns the SQL required for selecting from the table. """
return self.escape_column_name(self.column)
def order_by(self):
""" This is called to get the order by clause """
return self.escape_column_name(self.column)
def column_decorator(self, table, sql, query, result):
""" Every column type is given the opportunity to decorate its
table heading
"""
return self.name
## This allows the column to be used by the log builder.
def log_parse(self, row):
""" This is called by the log processing to parse the value of
this column from the row.
We start parsing at the start of the row. FIXME: Might be
faster to get passed the offset where to start parsing, so we
dont need to keep slicing strings.
We need to return the tuple:
consumed, name, sql
Where consumed is the number of bytes consumed from the row.
name is the name of the column to insert as, sql is the SQL to
use for insertion - note that if name starts with _ we take
sql as raw otherwise we escape it.
"""
## Try to consume a boundary:
b = self.boundary.match(row)
if b:
row = row[b.end():]
offset = b.end()
else:
offset = 0
capture = self.regex.match(row)
if not capture: raise RuntimeError("Unable to match %s on row %r " %
(self.regex_str, row))
return (capture.end()+offset, self.column, capture.group(0))
def add_display_hook(cls, name, cb, position=-1):
if name not in cls.display_hooks_names:
cls.display_hooks_names.append(name)
cls.display_hooks.insert(position,cb)
def clear_display_hook(cls):
cls.display_hooks = []
cls.display_hooks_names = []
### Some common basic ColumnTypes:
class StateType(ColumnType):
""" This column can hold one of several different states. """
## This is a list of states that we can take on. Keys are args,
## values are sql types.
hidden = True
states = {}
symbols = {
'=': 'equal'
}
def __init__(self, *args, **kwargs):
ColumnType.__init__(self, *args, **kwargs)
self.docs = {'is': """ Matches when the column is of the specified state. Supported states are %s""" % self.states.keys()}
self.tests = [ [ "is" ,"foobar", True ],
[ "is" , self.states.keys()[0], False],
]
self.states_rev = {}
for k,v in self.states.items():
self.states_rev[v]=k
def code_is(self, column, operator, state):
for k,v in self.states.items():
if state.lower()==k.lower():
return lambda row: row[self.column] == v
raise RuntimeError("Dont understand state %r. Valid states are %s" % (state,self.states.keys()))
def operator_is(self, column, operator, state):
for k,v in self.states.items():
if state.lower()==k.lower():
return DB.expand("%s = %r" ,(self.escape_column_name(self.column), v))
raise RuntimeError("Dont understand state %r. Valid states are %s" % (state,self.states.keys()))
def create(self):
return DB.expand("`%s` enum(%s) default NULL" ,
(self.column, ','.join([ DB.expand("%r",x) for x in self.states.values()])))
def plain_display_hook(self, value, row, result):
try:
result.text(self.states_rev[value])
except KeyError:
result.text(value)
display_hooks = [ plain_display_hook, ColumnType.link_display_hook]
class SetType(ColumnType):
""" This can hold a number of different items simultaneously """
hidden = True
tests = []
states = []
symbols = {
}
def __init__(self, *args, **kwargs):
ColumnType.__init__(self, *args, **kwargs)
self.states = kwargs['states']
self.docs = {'contains': """ Matches when the column is of the specified state. Supported states are %s""" % self.states}
def create(self):
return DB.expand("`%s` set('',%s)" ,
(self.column, ','.join([DB.expand("%r",x) for x in self.states])))
class IntegerType(ColumnType, LogParserMixin):
symbols = {
"=":"equal",
"!=":"literal",
"<=": "literal",
">=": "literal",
"<": "literal",
">": "literal",
}
def code_equal(self, column, operator, arg):
## Make sure our arg is actually an integer:
integer = int(arg)
return lambda row: int(row[self.column]) == integer
auto_increment = False
def create(self):
if self.default!=None:
return "`%s` int(11) not null default %s" % (self.column, self.default)
elif self.auto_increment:
return "`%s` int(11) not null auto_increment" % self.column
else:
return "`%s` int(11)" % self.column
class BigIntegerType(IntegerType):
def create(self):
return "`%s` BIGINT default 0" % self.column
class ShortIntegerType(IntegerType):
def create(self):
return "`%s` MEDIUMINT unsigned default 0" % self.column
class EditableStringType(ColumnType):
hidden = True
def edit_display_hook(self, value, row, result):
""" This method is called by the table widget to allow us to
translate the output from the database to the screen. Note
that we have access to the entire row (i.e. all the values in
the query if we need it).
"""
def edit_cb(query, result):
timeline = TimelineObj(case=query['case'])
if 'Update' in query.getarray('__submit__'):
query['id']=row['id']
new_id=timeline.edit(query,result)
return result.refresh(0, query, pane='parent')
## Present the user with the form:
result.start_form(query, pane='self')
result.heading("Edit Event")
## Then show the form
query['id']=row['id']
timeline.edit_form(query,result)
result.end_form(value='Update')
def delete_row_cb(query, result):
dbh = DB.DBO(query['case'])
dbh.delete('timeline', "id=%i" % row['id'])
result.refresh(0, query, pane='parent')
tmp1 = result.__class__(result)
tmp2 = result.__class__(result)
tmp3 = result.__class__(result)
tmp1.popup(edit_cb, "Edit this string", icon="balloon.png")
tmp2.popup(delete_row_cb, "Delete this row from the database", icon="delete.png")
tmp3.text(value, font='typewriter')
result.row(tmp1, tmp2, tmp3)
display_hooks = [edit_display_hook,]
class StringType(ColumnType,LogParserMixin):
symbols = {
"=":"equal",
"!=":"literal",
}
tests = [ ["=", "String", False],
["!=", "String", False],
["contains", "String", False],
["matches", "string", False],
["regex", "[0-9]+", False],
]
def make_index(self, dbh, table):
""" Creates an index on table using dbh """
dbh.check_index(table, self.column, length=50)
def __init__(self, *args, **kwargs):
self.text = kwargs.get('text',False)
self.width = kwargs.get('width',2000)
ColumnType.__init__(self, *args, **kwargs)
def create(self):
if self.text:
return "`%s` TEXT default NULL" % (self.column)
else:
return "`%s` VARCHAR(%s) default NULL" % (self.column, self.width)
def code_contains(self, column, operator, arg):
def x(row):
return arg in row[self.column]
return x
def operator_contains(self, column, operator, arg):
""" Matches when the column contains the pattern anywhere. Its the same as placing wildcards before and after the pattern. """
return self.operator_literal(column , 'like' , "%%" + arg + "%%")
def code_matches(self, column, operator, arg):
regex = arg.replace("%",".*")
return lambda row: re.match(regex, row[self.column])
def operator_matches(self, column, operator, arg):
""" This matches the pattern to the column. Wild cards (%) can be placed anywhere, but if you place it in front of the pattern it could be slower. """
return self.operator_literal(column , 'like' , arg)
def code_regex(self, column, operator, arg):
return lambda row: re.match(arg, row[self.column])
def operator_regex(self,column,operator,arg):
""" This applies the regular expression to the column (Can be slow for large tables) """
return self.operator_literal(column, 'rlike', arg)
class LogParser(LogParser):
defaults = LogParser.defaults[:]
defaults.append(['regex','RegEx', r"[^\s]+"])
defaults.append(['boundary', 'Boundary', r"\s+"])
class BlobType(StringType):
def create(self):
return "`%s` BLOB default NULL" % (self.column)
class LongStringType(StringType):
def create(self):
return "`%s` TEXT default NULL" % (self.column)
class TimestampType(IntegerType):
"""
This is a timestamp parser.
===========================
We can accept a format string to use to parse the timestamp from the log file.
The following directives can be embedded in the FORMAT string.
They are shown without the optional field width and precision
specification, and are replaced by the indicated characters in the
result:
========= =====================
Directive Meaning
--------- ---------------------
%a Locale's abbreviated
weekday name.
%A Locale's full weekday
name.
%b Locale's abbreviated
month name.
%B Locale's full month
name.
%c Locale's appropriate
date and time
representation.
%d Day of the month as a
decimal number
[01,31].
%H Hour (24-hour clock)
as a decimal number
[00,23].
%I Hour (12-hour clock)
as a decimal number
[01,12].
%j Day of the year as a
decimal number
[001,366].
%m Month as a decimal
number [01,12].
%M Minute as a decimal
number [00,59].
%p Locale's equivalent
of either AM or PM.
%S Second as a decimal
number [00,61].
%U Week number of the
year (Sunday as the
first day of the
week) as a decimal
number [00,53]. All
days in a new year
preceding the first
Sunday are considered
to be in week 0.
%w Weekday as a decimal
number [0(Sunday),6].
%W Week number of the
year (Monday as the
first day of the
week) as a decimal
number [00,53]. All
days in a new year
preceding the first
Monday are considered
to be in week 0.
%x Locale's appropriate
date representation.
%X Locale's appropriate
time representation.
%y Year without century
as a decimal number
[00,99].
%Y Year with century as
a decimal number.
%Z Time zone name (no
characters if no time
zone exists).
%% A literal %
character.
========= =====================
"""
tests = IntegerType.tests + [ ["after", "'0943234'", True],
["after" ,"2007-10-11", False],
["before", "23:22", False]
]
def __init__(self, name='', column='', format="%d/%b/%Y %H:%M:%S",
override_year = 0, **kwargs):
IntegerType.__init__(self,name=name,column=column, **kwargs)
self.format = format
self.override_year = int(override_year)
def create(self):
return "`%s` TIMESTAMP NULL DEFAULT '0000-00-00 00:00:00'" % self.column
def code_after(self, column, operator, arg):
""" Matches if the time in the column is later than the time
specified. We try to parse the time formats flexibly if
possible.
"""
date_arg = guess_date(arg)
return lambda row: guess_date(row[self.column]) > date_arg
def operator_after(self, column, operator, arg):
""" Matches times after the specified time. The time arguement must be given in the format 'YYYY-MM-DD HH:MM:SS' (i.e. Year, Month, Day, Hour, Minute, Second). """
date_arg = guess_date(arg)
return "%s > '%s'" % (self.escape_column_name(self.column), date_arg)
def code_before(self,column, operator, arg):
date_arg = guess_date(arg)
return lambda row: guess_date(row[self.column]) <= date_arg
def operator_before(self,column, operator, arg):
""" Matches times before the specified time. The time arguement must be as described for 'after'."""
date_arg = guess_date(arg)
return "%s < '%s'" % (self.escape_column_name(self.column), date_arg)
def log_parse(self, row):
t,m = Time.strptime(row, format = self.format)
if self.override_year:
t = list(t)
t[0] = self.override_year
date = time.strftime("%Y-%m-%d %H:%M:%S", t)
return m.end(), self.column, date
class LogParser(LogParser):
defaults = LogParser.defaults[:]
defaults.append(['format', 'Format String', "%d/%b/%Y %H:%M:%S"])
class PCAPTime(TimestampType):
symbols = {'=':'equal'}
LogCompatible = False
def select(self):
return "(select ts_sec from pcap where id=%s limit 1)" % self.escape_column_name(
self.column)
def order_by(self):
return self.column
def operator_after(self, column, operator, arg):
date_arg = guess_date(arg)
dbh = DB.DBO(self.case)
dbh.execute("select id from pcap where ts_sec > '%s' order by id limit 1" % (date_arg))
id = dbh.fetch()['id']
return "%s > '%s'" % (self.escape_column_name(self.column), id)
def operator_before(self, column, operator, arg):
date_arg = guess_date(arg)
dbh = DB.DBO(self.case)
dbh.execute("select id from pcap where ts_sec < '%s' order by id desc limit 1" % (date_arg))
id = dbh.fetch()['id']
return "%s < '%s'" % (self.escape_column_name(self.column), id)
# FIXME: I'm not sure this is the correct thing to do here
# may not scale well, seems to work though
def operator_equal(self, column, operator, arg):
date_arg = guess_date(arg)
dbh = DB.DBO(self.case)
dbh.execute("select id from pcap where ts_sec = '%s'" % (date_arg))
ids = [ str(row['id']) for row in dbh ]
return "%s in (%s)" % (self.escape_column_name(self.column), ",".join(ids))
class IPType(ColumnType, LogParserMixin):
""" Handles creating appropriate IP address ranges from a CIDR specification. """
## Code and ideas were borrowed from Christos TZOTZIOY Georgiouv ipv4.py:
## http://users.forthnet.gr/ath/chrisgeorgiou/python/
def __init__(self, name='', column='', **kwargs):
ColumnType.__init__(self, name=name, column=column, **kwargs)
self.extended_names = [name, name + "_geoip_city", name + "_geoip_country", name + "_geoip_org", name + "_geoip_isp", name + "_geoip_lat", name + "_geoip_long"]
# reMatchString: a re that matches string CIDR's
reMatchString = re.compile(
r'(\d+)' # first byte must always be given
r'(?:' # start optional parts
r'\.(\d+)' # second byte
r'(?:'# optionally third byte
r'\.(\d+)'
r'(?:' # optionally fourth byte
r'\.(\d+)'
r')?'
r')?' # fourth byte is optional
r')?' # third byte is optional too
r'(?:/(\d+))?$') # and bits possibly
# masks: a list of the masks indexed on the /network-number
masks = [0] + [int(-(2**(31-x))) for x in range(32)]
symbols = {
'=': 'equal',
'<': 'literal',
'>': 'literal',
'<=': 'literal',
'>=': 'literal',
'!=': 'literal',
}
tests = [ [ "=", "foo", True],
## Cant equate with a range
[ "=", '10.10.10.1', False],
[ "=", "10.10.10.1/24", True],
[ "netmask", "10.10.10.1/24", False],
# Should this be valid or not?
#[ "netmask", "0", True],
]
def code_equal(self, column, operator, address):
return lambda row: row[self.column] == address
def operator_equal(self, column, operator, address):
numeric_address, broadcast = self.parse_netmask(address)
if numeric_address != broadcast:
raise RuntimeError("You specified a netmask range for an = comparison. You should probably use the netmask operator instead")
return "%s = '%s'" % (self.escape_column_name(self.column), numeric_address)
def operator_literal(self, column, operator, address):
return DB.expand("%s %s INET_ATON(%r)" ,
(self.escape_column_name(self.column), operator, address))
def code_matches(self, column, operator, address):
""" Matches the IP address specified exactly """
return self.code_netmask(column, operator, address)
def code_netmask(self, column, operator, address):
""" Matches IP addresses that fall within the specified netmask. Netmask must be provided in CIDR notation or as an IP address (e.g. 192.168.1.1/24)."""
numeric_address, broadcast = self.parse_netmask(address)
def f(row):
ip = FlagFramework.inet_aton(row[column])
return ip > numeric_address and ip < broadcast
return f
def operator_matches(self, column, operator, address):
""" Matches the IP address specified exactly """
return self.operator_netmask(column, operator,address)
def parse_netmask(self, address):
# Parse arg as a netmask:
match = self.reMatchString.match(address)
try:
numbers = [x and int(x) or 0 for x in match.groups()]
# by packing we throw errors if any byte > 255
packed_address = struct.pack('4B', *numbers[:4]) # first 4 are in network order
numeric_address = struct.unpack('!I', packed_address)[0]
bits = numbers[4] or numbers[3] and 32 or numbers[2] and 24 or numbers[1] and 16 or 8
mask = self.masks[bits]
broadcast = (numeric_address & mask)|(~mask)
return numeric_address, broadcast
except Exception,e:
raise ValueError("%s does not look like a CIDR netmask (e.g. 10.10.10.0/24)" % address)
def operator_netmask(self, column, operator, address):
""" Matches IP addresses that fall within the specified netmask. Netmask must be provided in CIDR notation or as an IP address (e.g. 192.168.1.1/24)."""
numeric_address, broadcast = self.parse_netmask(address)
return " ( %s >= %s and %s <= %s ) " % (self.escape_column_name(self.column),
numeric_address,
self.escape_column_name(self.column),
broadcast)
def create(self):
## IP addresses are stored as 32 bit integers
return "`%s` int(11) unsigned default 0" % self.column
def select(self):
## Upon selection they will be converted to strings:
return "inet_ntoa(`%s`)" % (self.column)
def insert(self,value):
return "_"+self.column, DB.expand("inet_aton(%r)", value.strip())
display_hooks = IntegerType.display_hooks[:]
class InodeType(StringType):
""" A unified view of inodes """
hidden = True
LogCompatible = False
def __init__(self, name='Inode', column='inode', link=None, case=None, callback=None):
#raise RuntimeError("InodeType is depracated - you must use InodeIDType now")
self.case = case
StringType.__init__(self,name,column,link,callback=callback)
def get_inode(self, inode):
return inode
class InodeIDType(IntegerType):
LogCompatible = False
tests = [ [ "contains", "|G", False ],
[ "=", "Itest", False ],
]
def __init__(self, name='Inode', column='inode_id', **kwargs):
ColumnType.__init__(self, name=name, column=column, **kwargs)
self.table = 'inode'
def operator_contains(self, column, operator, pattern):
column = self.escape_column_name(self.column)
return "inode.inode like '%%%s%%'" % pattern
def export(self, value, exportdir):
""" Copy Inode data to the exportdir """
print "Exporting Inode %s to %s" % (value, exportdir)
fsfd = FileSystem.DBFS(self.case)
infd = fsfd.open(inode_id=value)
outfd = open("%s/%s" % (exportdir, value), "wb")
try:
while True:
data = infd.read(4096)
if not data: break
outfd.write(data)
except IOError, e:
print "Got Error exporting inode_id %s: %s" % (value, e)
outfd.close()
def html(self, value):
return '<a href="%s">%s</a>' % (value, value)
def csv(self, value):
fsfd = FileSystem.DBFS(self.case)
path, inode, inode_id = fsfd.lookup(inode_id=value)
return inode
def column_decorator(self, table, sql, query, result):
case = query['case']
report = Registry.REPORTS.dispatch(family = 'Disk Forensics',
report = "ViewFile")
report = report(None, result)
def browse_cb(query, result):
try:
limit = int(query.get('inode_limit',0))
except: limit = 0
dbh = DB.DBO(case)
dbh.cached_execute(sql, limit=limit, length=2)
row = dbh.fetch()
if not row:
result.heading("No inodes matching")
return
next_row = dbh.fetch()
fsfd = FileSystem.DBFS(self.case)
inode_id = row[self.name]
dbh.execute("select * from annotate where inode_id = %r limit 1",
inode_id)
row2 = dbh.fetch()
query.set('inode_id', inode_id)
query.default("mode", "Summary")
report.display(query, result)
## What should we do now - we basically set the type of
## toolbar to show
action = "activate"
if query.has_key('annotate'):
dbh = DB.DBO(self.case)
## We always do a delete in case there was a row there
dbh.delete('annotate',
where = 'inode_id = %s' % inode_id,)
## Then we do an insert to set the new value
if query['annotate'] == 'yes':
category = query.get("new_annotate_category")
if not category:
category = query.get("annotate_category","Note")
query.set("annotate_category",category)
query.clear("new_annotate_category")
dbh.insert('annotate',
inode_id = inode_id,
note = query.get("annotate_text","Tag"),
category = category,
)
action = 'deactivate'
else:
action = 'activate'
elif row2:
action = 'deactivate'
## Now we show the appropriate toolbar
if action=='activate':
query.set('annotate','yes')
result.toolbar(icon='yes.png', link = query, pane = 'pane')
else:
query.set('annotate','no')
result.toolbar(icon='no.png', link = query, pane = 'pane',
tooltip=row2 and row2['note'])
query.clear('annotate')
new_query = query.clone()
del new_query['inode']
if limit==0:
result.toolbar(icon = 'stock_left_gray.png')
else:
new_query.set('inode_limit', limit-1)
result.toolbar(icon = 'stock_left.png', link=new_query,
pane='self', tooltip = "Inode %s" % (limit -1))
if not next_row:
result.toolbar(icon = 'stock_right_gray.png')
else:
new_query.set('inode_limit', limit + 1)
result.toolbar(icon = 'stock_right.png', link=new_query,
pane='self',tooltip = "Inode %s" % (limit + 1))
def set_annotation_text(query,result):
query.default('annotate_text','Tag')
query.default("annotate_category", "Note")
result.decoration='naked'
result.heading("Set Annotation Text")
result.para("This text will be used for all quick annotation")
result.start_form(query, pane='parent_pane')
result.textarea("Annotation Text",'annotate_text')
TableActions.selector_display(None, "Category", "annotate_category",
result=result, table = 'annotate',
field='category', case=query['case'],
default='Note')
result.end_table()
result.end_form()
def goto_cb(query,result):
query.default('goto','0')
result.decoration='naked'
result.heading("Goto row number")
result.start_form(query, pane='parent_pane')
result.textfield("Row number",'inode_limit')
result.end_table()
result.end_form()
result.toolbar(
cb = set_annotation_text,
text = "Set Annotation Text",
icon = 'annotate.png', pane='popup',
)
result.toolbar(
cb = goto_cb,
text = "Goto row number (Current %s)" % query.get('inode_limit',1),
icon = 'stock_next-page.png',
pane = 'popup',)
result.toolbar(cb = browse_cb, icon="browse.png",
tooltip = "Browse Inodes in table", pane='new')
def graph_network_inodes(query, result):
# Run the query to get a list of inodes
dbh = DB.DBO(self.case)
dbh.execute(sql)
inode_list = []
for row in dbh:
inode_list.append(row['Inode'])
dbh.execute("select inode_id, inode from inode where inode_id in (%s)", ",".join([str(x) for x in inode_list]))
inode_list = []
# TODO: Replace this with a call to the method in IPID.py
for row in dbh:
#path, inode, inode_id = fsfd.lookup(inode_id=row['Inode'])
inode_id = row["inode_id"]
inode = row["inode"]
if "|S" in inode:
inode_parts = inode.split("|")
for i, p in enumerate(inode_parts):
if p.startswith("S"):
try:
if "/" not in p:
inode_list.append("|".join(inode_parts[:i+1]))
else:
streams = p.split("/")
inode_list.append("|".join(inode_parts[:i]) \
+ "|" + streams[0])
inode_list.append("|".join(inode_parts[:i]) \
+ "|S" + streams[1])
except RuntimeError:
pass
dbh.execute("select inode_id from inode where inode in ('%s')", "','".join(inode_list))
inode_list = []
for row in dbh:
inode_list.append(row["inode_id"])
new_query = query.clone()
new_query.default("inode_list", ",".join(set([str(x) for x in inode_list])))
graph_report = Registry.REPORTS.dispatch(family = 'Network Forensics',
report = "IPIDPlot")(None, result)
del new_query["family"]
del new_query["report"]
new_query["family"] = "Network Forensics"
new_query["report"] = "IPIDPlot"
new_query["time_off"] = 30
result.start_form(new_query, pane='parent_pane')
graph_report.form(new_query, result)
result.end_form()
#graph_query = query.clone()
result.toolbar(cb = graph_network_inodes, icon="pen.png", pane='parent',\
tooltip = "Plot the IPID of these Inodes")
return self.name
clear_display_hook(InodeIDType)
class FilenameType(StringType):
hidden = True
LogCompatible = False
def __init__(self, name='Filename', inode_id='inode_id',
basename=False, table='file',
link=None, link_pane=None, case=None, **kwargs):
if not link and not basename:
link = query_type(case=case,
family='Disk Forensics',
report='Browse Filesystem',
__target__='open_tree',open_tree="%s")
## This is true we only display the basename
self.basename = basename
ColumnType.__init__(self,name=name, column=inode_id,
link=link, link_pane=link_pane, table=table, **kwargs)
def render_links_display_hook(self, value,row,result):
if row['link']:
result.text("%s\n->%s" % (value, row['link']), style="red")
display_hooks = [render_links_display_hook, StringType.plain_display_hook,
StringType.link_display_hook]
def order_by(self):
return "concat(file.path, file.name)"
def select(self):
if self.basename:
return "file.link, file.name"
else: return "file.link, concat(file.path,file.name)"
## FIXME: implement filename globbing operators - this should be
## much faster than regex or match operators because in marches,
## the SQL translates to 'where concat(path,name) like "..."'. With
## a globbing operator it should be possible to split the glob
## into directory components and therefore create SQL specifically
## using path and name.
def operator_glob(self, column, operator, pattern):
""" Performs a glob operation on the Virtual file system. Wildcards are * and ?"""
directory,filename = os.path.split(pattern)
sql = ''
if directory:
pass
def operator_literal(self, column, operator, pattern):
column = self.escape_column_name(self.column)
return DB.expand("%s in (select inode_id from file where concat(file.path, file.name) %s %r)",
(column, operator, pattern))
def create(self):
return "path TEXT, name TEXT, link TEXT NULL"
class InodeInfo(StringType):
""" Displays inode information from inode_id """
hidden = True
def __init__(self, name='Size', inode_id='inode_id', field='size',
table=None,
link=None, link_pane=None, case=None, **kwargs):
## This is true we only display the basename
self.table = table
self.field = field
ColumnType.__init__(self,name=name, column=inode_id,
link=link, link_pane=link_pane, **kwargs)
def select(self):
return "(select `%s` from inode where inode_id=%s limit 1)" % (self.field, self.escape_column_namet("inode_id"))
def operator_literal(self, column, operator, pattern):
return DB.expand("`%s` in (select inode_id from inode where `%s` %s %r)",
(self.column, self.field, operator, pattern) )
class DeletedType(StateType):
""" This is a column type which shows deleted inodes graphically
"""
hidden = True
states = {'deleted':'deleted', 'allocated':'alloc'}
def __init__(self, **kwargs):
StateType.__init__(self, name='Del', column='status', **kwargs)
self.table = 'file'
def display(self,value, row, result):
""" Callback for rendering deleted items """
tmp=result.__class__(result)
if value=='alloc':
tmp.icon("yes.png")
elif value=='realloc':
tmp.icon("realloc.png")
elif value=='deleted':
tmp.icon("no.png")
else:
tmp.icon("question.png")
return tmp
class BinaryType(StateType):
""" This type defines fields which are either true or false """
states = {'true':'1', 'false':'0', 'set': 1, 'unset':0 }
def display(self,value, row,result):
if value:
return "*"
else:
return " "
class CounterType(IntegerType):
""" This is used to count the total numbers of things (in a group by) """
LogCompatible = False
def __init__(self, name=None, **kwargs):
IntegerType.__init__(self, name=name, column='count', **kwargs)
def select(self):
return "count(*)"
def order_by(self):
return "count"
class PacketType(IntegerType):
""" A Column type which links directly to the packet browser """
LogCompatible = False
def __init__(self, name="Packet", column='packet_id', case=None, **args):
IntegerType.__init__(self, name=name, column=column,
link = query_type(family='Network Forensics',
report="View Packet",
case=case,
__target__='id'), **args)
## Unit tests for the column types.
import unittest,re
import pyflag.tests
class ColumnTypeTests(pyflag.tests.ScannerTest):
""" Column Types """
test_case = "PyFlagTestCase"
test_file = "pyflag_stdimage_0.4.sgz"
subsystem = 'SGZip'
order = 20
offset = "16128s"
def setUp(self):
pyflag.tests.ScannerTest.setUp(self)
import pyflag.UI as UI
import pyflag.FlagFramework as FlagFramework
t = FlagFramework.CaseTable()
t.name = 'dummy'
self.ui = UI.GenericUI()
self.elements = [ IntegerType('IntegerType',column='integer_type', table='dummy'),
StringType('StringType',column='string_type'),
DeletedType( table='dummy'),
TimestampType('TimestampType','timestamp'),
IPType('IPType','source_ip'),
InodeIDType(),
FilenameType(),
]
self.tablename = 'dummy'
t.columns = [ [e, {}] for e in self.elements]
dbh=DB.DBO(self.test_case)
dbh.drop(self.tablename)
t.create(dbh)
def generate_sql(self, filter):
sql = self.ui._make_sql(elements = self.elements, filter_elements=self.elements,
table = self.tablename, case=None, filter = filter)
## Try to run the SQL to make sure its valid:
dbh=DB.DBO(self.test_case)
dbh.execute(sql)
## We are only interested in the where clause:
match = re.search("where \((.*)\) order", sql)
return match.group(1)
def test05FilteringTest(self):
""" Test filters on columns """
self.assertEqual(self.generate_sql("'IntegerType' > 10"),
"(1) and (`dummy`.`integer_type` > '10')")
self.assertEqual(self.generate_sql("'StringType' contains 'Key Word'"),
"(1) and (`dummy`.`string_type` like '%Key Word%')")
self.assertEqual(self.generate_sql("'StringType' matches 'Key Word'"),
"(1) and (`dummy`.`string_type` like 'Key Word')")
self.assertEqual(self.generate_sql("'StringType' regex '[a-z]*'"),
"(1) and (`dummy`.`string_type` rlike '[a-z]*')")
self.assertEqual(self.generate_sql("'DeletedType' is allocated"),
"(1) and (`dummy`.`deleted` = 'alloc')")
self.assertRaises(RuntimeError, self.generate_sql, ("'DeletedType' is foobar")),
self.assertEqual(self.generate_sql("'TimestampType' after 2005-10-10"),
"(1) and (`dummy`.`timestamp` > '2005-10-10 00:00:00')")
self.assertEqual(self.generate_sql("'IPType' netmask 10.10.10.1/24"),
"(1) and ( ( `source_ip` >= 168430081 and `source_ip` <= 168430335 ) )")
self.assertEqual(self.generate_sql("'InodeIDType' annotated FooBar"),
'(1) and (`inode_id`=(select annotate.inode_id from annotate where note like "%FooBar%"))')
## Joined filters:
self.assertEqual(self.generate_sql("InodeIDType contains 'Z|' and TimestampType after 2005-10-10"),
"(1) and ((`inode`.`inode_id` in (select inode_id from inode where inode like '%Z|%')) and `timestamp` > '2005-10-10 00:00:00')")
| gpl-2.0 | -698,840,353,569,850,500 | 37.953724 | 193 | 0.529152 | false |
UrbanCCD-UChicago/plenario | plenario/sensor_network/api/ifttt.py | 1 | 6149 | import json
import time
import uuid
from os import environ
from dateutil.parser import parse
from flask import make_response, request
from plenario.api.common import crossdomain, unknown_object_json_handler
from plenario.api.response import bad_request
from plenario.api.validator import IFTTTValidator, sensor_network_validate
from plenario.sensor_network.api.sensor_networks import get_observation_queries, get_raw_metadata, \
sanitize_validated_args
# dictionary mapping the curated drop-down list name to the correct feature and property
curated_map = {'temperature': 'temperature.temperature'}
# TODO: error list?
@crossdomain(origin='*')
def get_ifttt_observations():
if request.headers.get('IFTTT-Channel-Key') != environ.get('IFTTT_CHANNEL_KEY'):
return make_ifttt_error('incorrect channel key', 401)
input_args = request.json
args = dict()
try:
args['network'] = 'plenario_development'
args['nodes'] = [input_args['triggerFields']['node']]
args['feature'] = curated_map[input_args['triggerFields']['curated_property']].split('.')[0]
args['limit'] = input_args['limit'] if 'limit' in list(input_args.keys()) else 50
args['filter'] = json.dumps({'prop': curated_map[input_args['triggerFields']['curated_property']].split('.')[1],
'op': input_args['triggerFields']['op'],
'val': float(input_args['triggerFields']['val'])})
# pass through the curated input property so we can return it to the user for display purposes
curated_property = input_args['triggerFields']['curated_property']
except (KeyError, ValueError) as err:
return make_ifttt_error(str(err), 400)
# override the normal limit 0 behaviour, which is to apply no limit
if args['limit'] == 0:
return make_ifttt_response([])
fields = ('network', 'nodes', 'feature', 'sensors',
'start_datetime', 'end_datetime', 'limit', 'filter')
validated_args = sensor_network_validate(IFTTTValidator(only=fields), args)
if validated_args.errors:
return bad_request(validated_args.errors)
validated_args.data.update({
'features': [validated_args.data['feature']],
'feature': None
})
validated_args = sanitize_validated_args(validated_args)
observation_queries = get_observation_queries(validated_args)
if type(observation_queries) != list:
return observation_queries
return run_ifttt_queries(observation_queries, curated_property)
@crossdomain(origin='*')
def get_ifttt_meta(field):
if request.headers.get('IFTTT-Channel-Key') != environ.get('IFTTT_CHANNEL_KEY'):
return make_ifttt_error('incorrect channel key', 401)
data = []
if field == 'node':
args = {'network': 'plenario_development'}
fields = ('network',)
validated_args = sensor_network_validate(IFTTTValidator(only=fields), args)
data = [{'label': node.id,
'value': node.id} for node in get_raw_metadata('nodes', validated_args)]
elif field == 'curated_property':
data = [{'label': curated_property,
'value': curated_property} for curated_property in list(curated_map.keys())]
return make_ifttt_response(data)
def format_ifttt_observations(obs, curated_property):
obs_response = {
'node': obs.node_id,
'datetime': obs.datetime.isoformat() + '+05:00',
'curated_property': curated_property,
'value': getattr(obs, curated_map[curated_property].split('.')[1]),
'meta': {
'id': uuid.uuid1().hex,
'timestamp': int(time.time())
}
}
return obs_response
def run_ifttt_queries(queries, curated_property):
data = list()
for query, table in queries:
data += [format_ifttt_observations(obs, curated_property) for obs in query.all()]
data.sort(key=lambda x: parse(x['datetime']), reverse=True)
return make_ifttt_response(data)
def make_ifttt_response(data):
resp = {
'data': data
}
resp = make_response(json.dumps(resp, default=unknown_object_json_handler), 200)
resp.headers['Content-Type'] = 'application/json; charset=utf-8'
return resp
def make_ifttt_error(err, status_code):
resp = {
'errors': [{'message': err}]
}
resp = make_response(json.dumps(resp, default=unknown_object_json_handler), status_code)
resp.headers['Content-Type'] = 'application/json; charset=utf-8'
return resp
# ========================
# IFTTT testing endpoints
# ========================
@crossdomain(origin='*')
def ifttt_status():
if request.headers.get('IFTTT-Channel-Key') != environ.get('IFTTT_CHANNEL_KEY'):
return make_ifttt_error('incorrect channel key', 401)
resp = make_response('{}', 200)
resp.headers['Content-Type'] = 'application/json'
return resp
@crossdomain(origin='*')
def ifttt_test_setup():
if request.headers.get('IFTTT-Channel-Key') != environ.get('IFTTT_CHANNEL_KEY'):
return make_ifttt_error('incorrect channel key', 401)
resp = {
'data': {
'samples': {
'triggers': {
'property_comparison': {
'node': 'node_dev_1',
'curated_property': 'temperature',
'op': 'gt',
'val': 0
}
},
'triggerFieldValidations': {
'property_comparison': {
'node': {
'valid': 'node_dev_1',
'invalid': 'invalid_node'
},
'curated_property': {
'valid': 'temperature',
'invalid': 'invalid_property'
}
}
}
}
}
}
resp = make_response(json.dumps(resp, default=unknown_object_json_handler), 200)
resp.headers['Content-Type'] = 'application/json; charset=utf-8'
return resp
| mit | 2,408,303,469,001,245,000 | 34.137143 | 120 | 0.593267 | false |
bycoffe/django-liveblog | models.py | 1 | 1303 | import datetime
from django.db import models
from django.conf import settings
from django.template import loader, Context
from markdown import markdown
blog = __import__(settings.BLOG_APP)
Entry = blog.models.__getattribute__(settings.BLOG_ENTRY_MODEL)
if Entry.objects.count():
default_blog_entry = Entry.objects.all()[0]
else:
default_blog_entry = None
class LiveBlogEntry(models.Model):
pub_date = models.DateTimeField(default=datetime.datetime.now)
body = models.TextField()
body_html = models.TextField(editable=False, blank=True)
blog_entry = models.ForeignKey(Entry,
default=(Entry.objects.all()[0].id
if Entry.objects.count()
else None))
class Meta:
verbose_name_plural = "Live Blog Entries"
ordering = ['-pub_date', ]
def __unicode__(self):
self.sample_size = 100 # Used only in admin.
return '%s: %s %s' % (self.blog_entry.title,
self.body[:self.sample_size],
'...' if len(self.body) > self.sample_size else '')
def save(self, *args, **kwargs):
self.body_html = markdown(self.body)
super(LiveBlogEntry, self).save()
| bsd-3-clause | 335,256,228,754,973,900 | 34.216216 | 81 | 0.584804 | false |
cgmb/d2lmf | d2lmf/d2lmf.py | 1 | 11079 | # -*- coding: utf-8 -*-
# Copyright (C) 2015-2018 Cordell Bloor
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""d2lmf.d2lmf: provides entry point main()."""
from __future__ import print_function
import argparse
import os
import errno
import shutil
import sys
__version__ = "1.0.0"
def makedirs_exist(path):
"""
Makes a directory at the given path without raising an error if it already exists
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def copytree_exist(src, dst):
"""
Copies a directory tree at the given path into the destination directory
without raising an error if the destination already exists
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d)
else:
shutil.copy2(s, d)
def dir_empty_or_nonexistent(folder):
try:
return len(os.listdir(folder)) == 0
except OSError as e:
if e.errno != errno.ENOENT:
raise
return True
class ParserError(Exception):
pass
def parse_submission_dirname(dirname):
"""
Parses a directory name in the form '<id_number> - <student_name> - <timestamp>'
"""
seperator = ' - '
tokens = dirname.split(seperator)
if len(tokens) < 3:
raise ParserError('Expected hyphen-separated id, name and timestamp'
' in "%s"' % dirname)
id_number = tokens[0]
# we'll assume the extra hyphens are a part of the student's name
student_name = seperator.join(tokens[1:-1])
# ':' is not valid in NTFS filenames, so on Windows the time will have
# a '_' where there should be a ':'
timestamp = tokens[-1].replace('_',':')
return (id_number, student_name, timestamp)
def merge(src, dest):
"""
Merges the src folder into the dest folder
"""
vprint('Merging "%s" into "%s"' % (src, dest))
for src_root, dirs, files in os.walk(src):
dest_root = src_root.replace(src, dest, 1)
if not os.path.exists(dest_root):
os.makedirs(dest_root)
for f in files:
src_file = os.path.join(src_root, f)
dest_file = os.path.join(dest_root, f)
if os.path.exists(dest_file):
os.remove(dest_file)
shutil.move(src_file, dest_root)
shutil.rmtree(src)
def rename(input_folder, seperator):
"""
Rename all child folders, using their complicated D2L-given name to infer
the submitter's name. Use the submitter's name to create a short, easy
name for a folder to move the data to.
There may be multiple folders created by the same submitter, as they make
one for each submission. We'll merge those together, overwriting files
from the oldest with files from the newest whenever there's a conflict.
"""
from operator import itemgetter
from datetime import datetime
submissions = []
for name in os.listdir(input_folder):
if os.path.isdir(os.path.join(input_folder, name)):
try:
id_num, student, timestamp = parse_submission_dirname(name)
parsed_timestamp = datetime.strptime(timestamp,
'%b %d, %Y %I:%M %p') # Sep 29, 2015 4:17 PM
shortname = student.replace(' ', seperator)
submissions.append((name, shortname, parsed_timestamp))
except (ParserError,ValueError) as e:
print(e, file=sys.stderr)
# sort by student name, then by date
submissions.sort(key=itemgetter(1,2))
for dirname, student_name, timestamp in submissions:
try:
oldpath = os.path.join(input_folder, dirname)
newpath = os.path.join(input_folder, student_name)
if os.path.exists(newpath):
merge(oldpath, newpath)
else:
os.rename(oldpath, newpath)
except OSError as e:
print(e, file=sys.stderr)
print('Failed to merge "%s"' % oldpath, file=sys.stderr)
def extract_nested(folder):
"""
Unzip, untar, unrar, or whatever any file found in the student submission.
"""
import patoolib
supported_suffixes = ('.zip', '.rar', '.tar.gz', '.tgz', '.tar.bz2',
'.tar.xz', '.7z', '.tar')
for root, dirs, files in os.walk(folder):
for f in files:
if f.endswith(supported_suffixes):
try:
archive = os.path.join(root, f)
vprint('Extracting archive: "%s"' % archive)
patoolib.extract_archive(archive, verbosity=-1,
interactive=False, outdir=root)
os.remove(archive)
except patoolib.util.PatoolError as e:
print(e, file=sys.stderr)
print('Failed to extract "%s"' % archive, file=sys.stderr)
def collapse_lonely(folder):
"""
Collapse 'lonely' folders into their parents. These are folders that are
needlessly nested. They have no sibling files or folders, so their existence
does not separate their from anything.
"""
for submission in os.listdir(folder):
submission_path = os.path.join(folder, submission)
if os.path.isdir(submission_path):
submitted_files = os.listdir(submission_path)
if len(submitted_files) == 1:
submitted_file_path = os.path.join(submission_path, submitted_files[0])
if os.path.isdir(submitted_file_path):
vprint('Collapsing directory into parent: "%s"' % submitted_file_path)
for f in os.listdir(submitted_file_path):
f_path = os.path.join(submitted_file_path, f)
shutil.move(f_path, submission_path)
os.rmdir(submitted_file_path)
def clean_junk(folder):
"""
Deletes useless files from the given directory tree
"""
for root, dirs, files in os.walk(folder):
for f in files:
if f in ['.DS_Store']:
try:
junk = os.path.join(root, f)
vprint('Removing: "%s"' % junk)
os.remove(junk)
except OSError as e:
print(e, file=sys.stderr)
print('Failed to remove "%s"' % junk, file=sys.stderr)
for d in dirs:
if d in ['__MACOSX']:
try:
junk = os.path.join(root, d)
vprint('Removing: "%s"' % junk)
shutil.rmtree(junk)
except (shutil.Error,OSError) as e:
print(e, file=sys.stderr)
print('Failed to remove "%s"' % junk, file=sys.stderr)
class ExtractError(Exception):
pass
def extract(args):
import zipfile
if not dir_empty_or_nonexistent(args.output_folder):
raise ExtractError('Output folder must be empty')
if os.path.isdir(args.input_path):
copytree_exist(args.input_path, args.output_folder)
else:
makedirs_exist(args.output_folder)
with zipfile.ZipFile(args.input_path, 'r') as z:
z.extractall(args.output_folder)
if args.extract_nested:
extract_nested(args.output_folder)
if args.junk:
clean_junk(args.output_folder)
if args.collapse:
collapse_lonely(args.output_folder)
if args.merge:
rename(args.output_folder, args.seperator)
def setup_vprint(args):
"""
Defines the function vprint, which only prints when --verbose is set
"""
global vprint
vprint = print if args.verbose else lambda *a, **k: None
def expand_aliases(args):
"""
Expands all arguments that are aliases for collections of other arguments.
"""
if args.recommended:
args.extract_nested = True
args.junk = True
args.collapse = True
args.merge = True
def main():
parser = argparse.ArgumentParser(prog='d2lmf',
description='d2lmf is a suite of tools to help mark assignments '
'submitted to D2L.')
parser.add_argument('-v','--verbose',
action='store_true',
help='Display more information about files being changed.')
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
subparsers = parser.add_subparsers(help='')
extract_parser = subparsers.add_parser('extract',
help='Extract student submissions from the D2L zip file and '
'optionally process them to be easier to work with.')
extract_parser.add_argument('input_path',
help='The zip file or unzipped directory to extract data from.')
extract_parser.add_argument('output_folder',
help='The folder in which to put extracted data.')
extract_parser.add_argument('-R','--recommended',
action='store_true',
help='Use the recommended extraction settings. This is an alias '
'for -xjcm.')
extract_parser.add_argument('-x','--extract-nested',
action='store_true',
help='Uses command-line tools to attempt to extract submitted '
'archive files, like zip files, tar files, rar files and 7zip '
'files.')
extract_parser.add_argument('-j','--junk',
action='store_true',
help='Clean up any unnecessary files and folders in the '
"submission, like '.DS_Store'.")
extract_parser.add_argument('-c','--collapse',
action='store_true',
help='Collapse pointless subdirectories whose parent directory '
'contains nothing else.')
extract_parser.add_argument('-m','--merge',
action='store_true',
help="Merge all of a student's submissions into a single folder.")
extract_parser.add_argument('-s','--seperator', default='_',
help="The seperator to replace spaces in the merged folder's name.")
extract_parser.add_argument('-v','--verbose',
action='store_true',
help='Display more information about files being changed.')
extract_parser.set_defaults(func=extract)
args = parser.parse_args()
setup_vprint(args)
expand_aliases(args)
try:
args.func(args)
except ExtractError as e:
print(e, file=sys.stderr)
sys.exit(1)
| gpl-3.0 | -6,830,093,550,474,210,000 | 37.203448 | 90 | 0.601498 | false |
codingforentrepreneurs/DjangoGap | src/postings/migrations/0003_auto_20141113_2257.py | 1 | 1504 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('postings', '0002_auto_20141111_0540'),
]
operations = [
migrations.AlterModelOptions(
name='posting',
options={'ordering': ['-updated', '-timestamp']},
),
migrations.RemoveField(
model_name='posting',
name='post',
),
migrations.AddField(
model_name='posting',
name='title',
field=models.CharField(default=b'Title', max_length=200),
preserve_default=True,
),
migrations.AddField(
model_name='posting',
name='url',
field=models.URLField(default=b'http://youtube.com/', max_length=400),
preserve_default=True,
),
migrations.AlterField(
model_name='posting',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2014, 11, 13, 22, 57, 38, 90833, tzinfo=utc), auto_now_add=True),
preserve_default=True,
),
migrations.AlterField(
model_name='posting',
name='updated',
field=models.DateTimeField(default=datetime.datetime(2014, 11, 13, 22, 57, 38, 90874, tzinfo=utc), auto_now=True),
preserve_default=True,
),
]
| gpl-2.0 | 6,243,464,691,273,709,000 | 30.333333 | 130 | 0.563165 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.